HBASE-19240 more error-prone results

This commit is contained in:
Mike Drob 2017-11-10 16:32:25 -06:00
parent bc8048cf6c
commit cd681f26bc
27 changed files with 152 additions and 143 deletions

View File

@ -22,14 +22,10 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.curator.shaded.com.google.common.collect.ConcurrentHashMultiset;
import org.apache.curator.shaded.com.google.common.collect.Multiset;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.CellScanner;
@ -41,9 +37,13 @@ import org.apache.hadoop.hbase.coprocessor.ProtobufCoprocessorService;
import org.apache.hadoop.hbase.ipc.DelegatingHBaseRpcController;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ConcurrentHashMultiset;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Multiset;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;

View File

@ -198,24 +198,22 @@ public class IntegrationTestRpcClient {
@Override
public void run() {
while (running.get()) {
switch (random.nextInt() % 2) {
case 0: //start a server
if (random.nextBoolean()) {
//start a server
try {
cluster.startServer();
} catch (Exception e) {
LOG.warn(e);
exception.compareAndSet(null, e);
}
break;
case 1: // stop a server
} else {
// stop a server
try {
cluster.stopRandomServer();
} catch (Exception e) {
LOG.warn(e);
exception.compareAndSet(null, e);
}
default:
}
Threads.sleep(100);

View File

@ -75,8 +75,9 @@ public class VersionModel implements Serializable, ProtobufMessageHandler {
System.getProperty("os.version") + ' ' +
System.getProperty("os.arch");
serverVersion = context.getServerInfo();
jerseyVersion = ServletContainer.class.getClass().getPackage()
.getImplementationVersion();
jerseyVersion = ServletContainer.class.getPackage().getImplementationVersion();
// Currently, this will always be null because the manifest doesn't have any useful information
if (jerseyVersion == null) jerseyVersion = "";
}
/**

View File

@ -38,6 +38,7 @@ import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.servlet.ServletContainer;
import javax.servlet.DispatcherType;
import java.util.Arrays;
import java.util.EnumSet;
public class HBaseRESTTestingUtility {
@ -87,7 +88,7 @@ public class HBaseRESTTestingUtility {
filter = filter.trim();
ctxHandler.addFilter(filter, "/*", EnumSet.of(DispatcherType.REQUEST));
}
LOG.info("Loaded filter classes :" + filterClasses);
LOG.info("Loaded filter classes :" + Arrays.toString(filterClasses));
conf.set(RESTServer.REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY, ".*");
RESTServer.addCSRFFilter(ctxHandler, conf);

View File

@ -93,8 +93,8 @@ public class TestVersionResource {
assertNotNull(model.getServerVersion());
String jerseyVersion = model.getJerseyVersion();
assertNotNull(jerseyVersion);
assertEquals(jerseyVersion, ServletContainer.class.getClass().getPackage()
.getImplementationVersion());
// TODO: fix when we actually get a jersey version
// assertEquals(jerseyVersion, ServletContainer.class.getPackage().getImplementationVersion());
}
@Test
@ -111,8 +111,8 @@ public class TestVersionResource {
assertTrue(body.contains(System.getProperty("os.name")));
assertTrue(body.contains(System.getProperty("os.version")));
assertTrue(body.contains(System.getProperty("os.arch")));
assertTrue(body.contains(ServletContainer.class.getClass().getPackage()
.getImplementationVersion()));
// TODO: fix when we actually get a jersey version
// assertTrue(body.contains(ServletContainer.class.getPackage().getImplementationVersion()));
}
@Test

View File

@ -21,7 +21,12 @@ package org.apache.hadoop.hbase.rest.model;
import org.apache.hadoop.hbase.testclassification.RestTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Assume;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import javax.servlet.ServletContext;
@Category({RestTests.class, SmallTests.class})
public class TestVersionModel extends TestModelBase<VersionModel> {

View File

@ -69,6 +69,7 @@ import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -143,8 +144,8 @@ public class TestZooKeeper {
* @throws IOException
* @throws InterruptedException
*/
// fails frequently, disabled for now, see HBASE-6406
//@Test
@Ignore("fails frequently, disabled for now, see HBASE-6406")
@Test
public void testClientSessionExpired() throws Exception {
Configuration c = new Configuration(TEST_UTIL.getConfiguration());

View File

@ -4688,8 +4688,8 @@ public class TestFromClientSide {
NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY)
.get(QUALIFIER);
assertEquals("The number of versions of '" + FAMILY + ":" + QUALIFIER
+ " did not match " + versions, versions, navigableMap.size());
assertEquals("The number of versions of '" + Bytes.toString(FAMILY) + ":"
+ Bytes.toString(QUALIFIER) + " did not match", versions, navigableMap.size());
for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
assertTrue("The value at time " + entry.getKey()
+ " did not match what was put",
@ -4724,8 +4724,8 @@ public class TestFromClientSide {
NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY)
.get(QUALIFIER);
assertEquals("The number of versions of '" + FAMILY + ":" + QUALIFIER + " did not match " +
versions + "; " + put.toString() + ", " + get.toString(), versions, navigableMap.size());
assertEquals("The number of versions of '" + Bytes.toString(FAMILY) + ":"
+ Bytes.toString(QUALIFIER) + " did not match", versions, navigableMap.size());
for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
assertTrue("The value at time " + entry.getKey()
+ " did not match what was put",

View File

@ -272,9 +272,7 @@ public class TestCoprocessorInterface {
@Override
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
final Get get, final List<Cell> results) throws IOException {
if (1/0 == 1) {
e.complete();
}
throw new RuntimeException();
}
});
}

View File

@ -212,7 +212,7 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors {
new Put(row2).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy),
new Put(row3).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy),
};
LOG.info("Putting:" + puts);
LOG.info("Putting:" + Arrays.toString(puts));
miniBatchOp.addOperationsFromCP(0, puts);
}
}

View File

@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.wal.WAL;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -622,7 +623,7 @@ public class TestFilter {
* @throws Exception
*/
@Test
public void tes94FilterRowCompatibility() throws Exception {
public void test94FilterRowCompatibility() throws Exception {
Scan s = new Scan();
OldTestFilter filter = new OldTestFilter();
s.setFilter(filter);
@ -2051,7 +2052,8 @@ public class TestFilter {
}
}
// TODO: intentionally disabled?
@Test
@Ignore("TODO: intentionally disabled?")
public void testNestedFilterListWithSCVF() throws IOException {
byte[] columnStatus = Bytes.toBytes("S");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -239,7 +240,7 @@ public class TestMasterOperationsForRegionReplicas {
}
}
//@Test (TODO: enable when we have support for alter_table- HBASE-10361).
@Test @Ignore("Enable when we have support for alter_table- HBASE-10361")
public void testIncompleteMetaTableReplicaInformation() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final int numRegions = 3;

View File

@ -421,7 +421,8 @@ public class TestRegionPlacement {
for (Region region: rs.getRegions(TableName.valueOf("testRegionAssignment"))) {
InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion(
region.getRegionInfo().getEncodedName());
List<ServerName> favoredServerList = plan.getAssignmentMap().get(region.getRegionInfo());
String regionName = region.getRegionInfo().getRegionNameAsString();
List<ServerName> favoredServerList = plan.getAssignmentMap().get(regionName);
// All regions are supposed to have favored nodes,
// except for hbase:meta and ROOT

View File

@ -343,6 +343,7 @@ public class TestSimpleRegionNormalizer {
assertEquals(hri4, ((SplitNormalizationPlan) plan).getRegionInfo());
}
@SuppressWarnings("MockitoCast")
protected void setupMocksForNormalizer(Map<byte[], Integer> regionSizes,
List<RegionInfo> RegionInfo) {
masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS);
@ -360,7 +361,10 @@ public class TestSimpleRegionNormalizer {
when(regionLoad.getName()).thenReturn(region.getKey());
when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue());
when(masterServices.getServerManager().getLoad(sn).
// this is possibly broken with jdk9, unclear if false positive or not
// suppress it for now, fix it when we get to running tests on 9
// see: http://errorprone.info/bugpattern/MockitoCast
when((Object) masterServices.getServerManager().getLoad(sn).
getRegionsLoad().get(region.getKey())).thenReturn(regionLoad);
}
try {

View File

@ -207,6 +207,8 @@ public class TestMasterProcedureSchedulerConcurrency {
case READ:
queue.wakeTableSharedLock(proc, getTableName(proc));
break;
default:
throw new UnsupportedOperationException();
}
}

View File

@ -35,13 +35,14 @@ import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@Category({MasterTests.class, LargeTests.class})
public class TestWALProcedureStoreOnHDFS {
@ -62,7 +63,10 @@ public class TestWALProcedureStoreOnHDFS {
}
};
private static void initConfig(Configuration conf) {
@Before
public void initConfig() {
Configuration conf = UTIL.getConfiguration();
conf.setInt("dfs.replication", 3);
conf.setInt("dfs.namenode.replication.min", 3);
@ -72,7 +76,8 @@ public class TestWALProcedureStoreOnHDFS {
conf.setInt(WALProcedureStore.MAX_SYNC_FAILURE_ROLL_CONF_KEY, 10);
}
public void setup() throws Exception {
// No @Before because some tests need to do additional config first
private void setupDFS() throws Exception {
MiniDFSCluster dfs = UTIL.startMiniDFSCluster(3);
Path logDir = new Path(new Path(dfs.getFileSystem().getUri()), "/test-logs");
@ -82,6 +87,7 @@ public class TestWALProcedureStoreOnHDFS {
store.recoverLease();
}
@After
public void tearDown() throws Exception {
store.stop(false);
UTIL.getDFSCluster().getFileSystem().delete(store.getWALDir(), true);
@ -95,102 +101,85 @@ public class TestWALProcedureStoreOnHDFS {
@Test(timeout=60000, expected=RuntimeException.class)
public void testWalAbortOnLowReplication() throws Exception {
initConfig(UTIL.getConfiguration());
setup();
try {
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
setupDFS();
LOG.info("Stop DataNode");
UTIL.getDFSCluster().stopDataNode(0);
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
LOG.info("Stop DataNode");
UTIL.getDFSCluster().stopDataNode(0);
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
store.insert(new TestProcedure(1, -1), null);
for (long i = 2; store.isRunning(); ++i) {
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
store.insert(new TestProcedure(1, -1), null);
for (long i = 2; store.isRunning(); ++i) {
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
store.insert(new TestProcedure(i, -1), null);
Thread.sleep(100);
}
assertFalse(store.isRunning());
fail("The store.insert() should throw an exeption");
} finally {
tearDown();
store.insert(new TestProcedure(i, -1), null);
Thread.sleep(100);
}
assertFalse(store.isRunning());
}
@Test(timeout=60000)
public void testWalAbortOnLowReplicationWithQueuedWriters() throws Exception {
initConfig(UTIL.getConfiguration());
setup();
try {
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
store.registerListener(new ProcedureStore.ProcedureStoreListener() {
@Override
public void postSync() {
Threads.sleepWithoutInterrupt(2000);
setupDFS();
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
store.registerListener(new ProcedureStore.ProcedureStoreListener() {
@Override
public void postSync() { Threads.sleepWithoutInterrupt(2000); }
@Override
public void abortProcess() {}
});
final AtomicInteger reCount = new AtomicInteger(0);
Thread[] thread = new Thread[store.getNumThreads() * 2 + 1];
for (int i = 0; i < thread.length; ++i) {
final long procId = i + 1;
thread[i] = new Thread(() -> {
try {
LOG.debug("[S] INSERT " + procId);
store.insert(new TestProcedure(procId, -1), null);
LOG.debug("[E] INSERT " + procId);
} catch (RuntimeException e) {
reCount.incrementAndGet();
LOG.debug("[F] INSERT " + procId + ": " + e.getMessage());
}
@Override
public void abortProcess() {}
});
final AtomicInteger reCount = new AtomicInteger(0);
Thread[] thread = new Thread[store.getNumThreads() * 2 + 1];
for (int i = 0; i < thread.length; ++i) {
final long procId = i + 1;
thread[i] = new Thread() {
public void run() {
try {
LOG.debug("[S] INSERT " + procId);
store.insert(new TestProcedure(procId, -1), null);
LOG.debug("[E] INSERT " + procId);
} catch (RuntimeException e) {
reCount.incrementAndGet();
LOG.debug("[F] INSERT " + procId + ": " + e.getMessage());
}
}
};
thread[i].start();
}
Thread.sleep(1000);
LOG.info("Stop DataNode");
UTIL.getDFSCluster().stopDataNode(0);
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
for (int i = 0; i < thread.length; ++i) {
thread[i].join();
}
assertFalse(store.isRunning());
assertTrue(reCount.toString(), reCount.get() >= store.getNumThreads() &&
reCount.get() < thread.length);
} finally {
tearDown();
thread[i].start();
}
Thread.sleep(1000);
LOG.info("Stop DataNode");
UTIL.getDFSCluster().stopDataNode(0);
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
for (int i = 0; i < thread.length; ++i) {
thread[i].join();
}
assertFalse(store.isRunning());
assertTrue(reCount.toString(), reCount.get() >= store.getNumThreads() &&
reCount.get() < thread.length);
}
@Test(timeout=60000)
public void testWalRollOnLowReplication() throws Exception {
initConfig(UTIL.getConfiguration());
UTIL.getConfiguration().setInt("dfs.namenode.replication.min", 1);
setup();
try {
int dnCount = 0;
store.insert(new TestProcedure(1, -1), null);
UTIL.getDFSCluster().restartDataNode(dnCount);
for (long i = 2; i < 100; ++i) {
store.insert(new TestProcedure(i, -1), null);
waitForNumReplicas(3);
Thread.sleep(100);
if ((i % 30) == 0) {
LOG.info("Restart Data Node");
UTIL.getDFSCluster().restartDataNode(++dnCount % 3);
}
setupDFS();
int dnCount = 0;
store.insert(new TestProcedure(1, -1), null);
UTIL.getDFSCluster().restartDataNode(dnCount);
for (long i = 2; i < 100; ++i) {
store.insert(new TestProcedure(i, -1), null);
waitForNumReplicas(3);
Thread.sleep(100);
if ((i % 30) == 0) {
LOG.info("Restart Data Node");
UTIL.getDFSCluster().restartDataNode(++dnCount % 3);
}
assertTrue(store.isRunning());
} finally {
tearDown();
}
assertTrue(store.isRunning());
}
public void waitForNumReplicas(int numReplicas) throws Exception {

View File

@ -72,6 +72,7 @@ public class TestCachedMobFile extends TestCase{
Assert.assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount());
}
@SuppressWarnings("SelfComparison")
@Test
public void testCompare() throws Exception {
String caseName = getName();

View File

@ -366,8 +366,8 @@ public class TestEndToEndSplitTransaction {
}
if (daughterA == null || daughterB == null) {
throw new IOException("Failed to get daughters, daughterA=" + daughterA + ", daughterB=" +
daughterB + ", timeout=" + timeout + ", result=" + result + ", regionName=" + regionName +
", region=" + region);
daughterB + ", timeout=" + timeout + ", result=" + result + ", regionName=" +
Bytes.toString(regionName) + ", region=" + region);
}
//if we are here, this means the region split is complete or timed out

View File

@ -162,6 +162,7 @@ public class TestHRegionInfo {
assertTrue(HRegionInfo.FIRST_META_REGIONINFO.isMetaRegion());
}
@SuppressWarnings("SelfComparison")
@Test
public void testComparator() {
final TableName tableName = TableName.valueOf(name.getMethodName());

View File

@ -764,6 +764,7 @@ public class TestKeepDeletes {
/**
* Verify scenarios with multiple CFs and columns
*/
@Test
public void testWithMixedCFs() throws Exception {
HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1,
HConstants.FOREVER, KeepDeletedCells.TRUE);

View File

@ -27,8 +27,8 @@ import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -69,7 +69,7 @@ public class TestMemStoreChunkPool {
ChunkCreator.chunkPoolDisabled = chunkPoolDisabledBeforeTest;
}
@Before
@After
public void tearDown() throws Exception {
chunkCreator.clearChunksInPool();
}

View File

@ -237,8 +237,8 @@ public class TestRegionServerMetrics {
ResultScanner scanner = table.getScanner(scan);
for (int i = 0; i < n; i++) {
Result res = scanner.next();
LOG.debug(
"Result row: " + Bytes.toString(res.getRow()) + ", value: " + res.getValue(cf, qualifier));
LOG.debug("Result row: " + Bytes.toString(res.getRow()) + ", value: " +
Bytes.toString(res.getValue(cf, qualifier)));
}
}

View File

@ -101,7 +101,7 @@ public class TestServerNonceManager {
ServerNonceManager nm = createManager();
try {
nm.endOperation(NO_NONCE, 1, true);
fail("Should have thrown");
throw new Error("Should have thrown");
} catch (AssertionError err) {}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ -50,7 +51,6 @@ import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.StoppableImplementation;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@ -133,6 +133,19 @@ public class TestStoreFileRefresherChore {
}
}
private void verifyDataExpectFail(Region newReg, int startRow, int numRows, byte[] qf,
byte[]... families) throws IOException {
boolean threw = false;
try {
verifyData(newReg, startRow, numRows, qf, families);
} catch (AssertionError e) {
threw = true;
}
if (!threw) {
fail("Expected data verification to fail");
}
}
private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families)
throws IOException {
for (int i = startRow; i < startRow + numRows; i++) {
@ -189,17 +202,12 @@ public class TestStoreFileRefresherChore {
primary.flush(true);
verifyData(primary, 0, 100, qf, families);
try {
verifyData(replica1, 0, 100, qf, families);
Assert.fail("should have failed");
} catch(AssertionError ex) {
// expected
}
verifyDataExpectFail(replica1, 0, 100, qf, families);
chore.chore();
verifyData(replica1, 0, 100, qf, families);
// simulate an fs failure where we cannot refresh the store files for the replica
((FailingHRegionFileSystem)((HRegion)replica1).getRegionFileSystem()).fail = true;
((FailingHRegionFileSystem)replica1.getRegionFileSystem()).fail = true;
// write some more data to primary and flush
putData(primary, 100, 100, qf, families);
@ -209,18 +217,13 @@ public class TestStoreFileRefresherChore {
chore.chore(); // should not throw ex, but we cannot refresh the store files
verifyData(replica1, 0, 100, qf, families);
try {
verifyData(replica1, 100, 100, qf, families);
Assert.fail("should have failed");
} catch(AssertionError ex) {
// expected
}
verifyDataExpectFail(replica1, 100, 100, qf, families);
chore.isStale = true;
chore.chore(); //now after this, we cannot read back any value
try {
verifyData(replica1, 0, 100, qf, families);
Assert.fail("should have failed with IOException");
fail("should have failed with IOException");
} catch(IOException ex) {
// expected
}

View File

@ -490,7 +490,7 @@ public class TestReplicationEndpoint extends TestReplicationBase {
}
super.replicate(replicateContext);
LOG.info("Replicated " + row + ", count=" + replicateCount.get());
LOG.info("Replicated " + Bytes.toString(row) + ", count=" + replicateCount.get());
replicated.set(replicateCount.get() > COUNT); // first 10 times, we return false
return replicated.get();

View File

@ -189,9 +189,9 @@ public class TestTablePermissions {
permission = userPerms.get(0);
assertEquals("Permission should be for " + TEST_TABLE,
TEST_TABLE, permission.getTableName());
assertTrue("Permission should be for family " + TEST_FAMILY,
assertTrue("Permission should be for family " + Bytes.toString(TEST_FAMILY),
Bytes.equals(TEST_FAMILY, permission.getFamily()));
assertTrue("Permission should be for qualifier " + TEST_QUALIFIER,
assertTrue("Permission should be for qualifier " + Bytes.toString(TEST_QUALIFIER),
Bytes.equals(TEST_QUALIFIER, permission.getQualifier()));
// check actions

View File

@ -125,8 +125,8 @@ public class MultiThreadedUpdaterWithACL extends MultiThreadedUpdater {
res = localTable.get(get);
}
} catch (IOException ie) {
LOG.warn("Failed to get the row for key = [" + get.getRow() + "], column family = ["
+ Bytes.toString(cf) + "]", ie);
LOG.warn("Failed to get the row for key = [" + Bytes.toString(get.getRow()) +
"], column family = [" + Bytes.toString(cf) + "]", ie);
}
return res;
}
@ -151,8 +151,8 @@ public class MultiThreadedUpdaterWithACL extends MultiThreadedUpdater {
Result result = (Result) user.runAs(action);
return result;
} catch (Exception ie) {
LOG.warn("Failed to get the row for key = [" + get.getRow() + "], column family = ["
+ Bytes.toString(cf) + "]", ie);
LOG.warn("Failed to get the row for key = [" + Bytes.toString(get.getRow()) +
"], column family = [" + Bytes.toString(cf) + "]", ie);
}
}
// This means that no users were present