HBASE-14999 Remove ref to org.mortbay.log.Log.

This commit is contained in:
anoopsjohn 2015-12-17 18:01:16 +05:30
parent d78eddfdc8
commit cf458d3023
8 changed files with 70 additions and 40 deletions

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -31,7 +33,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.mortbay.log.Log;
import com.google.protobuf.ServiceException;
@ -41,6 +42,8 @@ import com.google.protobuf.ServiceException;
@InterfaceAudience.Private
public class FlushRegionCallable extends RegionAdminServiceCallable<FlushRegionResponse> {
private static final Log LOG = LogFactory.getLog(FlushRegionCallable.class);
private final byte[] regionName;
private final boolean writeFlushWalMarker;
private boolean reload;
@ -78,7 +81,7 @@ public class FlushRegionCallable extends RegionAdminServiceCallable<FlushRegionR
if (!reload) {
throw new IOException("Cached location seems to be different than requested region.");
}
Log.info("Skipping flush region, because the located region "
LOG.info("Skipping flush region, because the located region "
+ Bytes.toStringBinary(location.getRegionInfo().getRegionName()) + " is different than "
+ " requested region " + Bytes.toStringBinary(regionName));
return FlushRegionResponse.newBuilder()

View File

@ -42,6 +42,8 @@ import java.util.jar.Manifest;
import javax.tools.JavaCompiler;
import javax.tools.ToolProvider;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.AfterClass;
@ -50,10 +52,12 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.mortbay.log.Log;
@Category({MiscTests.class, SmallTests.class})
public class TestClassFinder {
private static final Log LOG = LogFactory.getLog(TestClassFinder.class);
@Rule public TestName name = new TestName();
private static final HBaseCommonTestingUtility testUtil = new HBaseCommonTestingUtility();
private static final String BASEPKG = "tfcpkg";
@ -79,7 +83,7 @@ public class TestClassFinder {
deleteTestDir();
}
assertTrue(testDir.mkdirs());
Log.info("Using new, clean directory=" + testDir);
LOG.info("Using new, clean directory=" + testDir);
}
@AfterClass
@ -142,7 +146,7 @@ public class TestClassFinder {
public void testClassFinderFiltersByNameInJar() throws Exception {
final long counter = testCounter.incrementAndGet();
final String classNamePrefix = name.getMethodName();
Log.info("Created jar " + createAndLoadJar("", classNamePrefix, counter));
LOG.info("Created jar " + createAndLoadJar("", classNamePrefix, counter));
ClassFinder.FileNameFilter notExcNameFilter = new ClassFinder.FileNameFilter() {
@Override
@ -162,7 +166,7 @@ public class TestClassFinder {
public void testClassFinderFiltersByClassInJar() throws Exception {
final long counter = testCounter.incrementAndGet();
final String classNamePrefix = name.getMethodName();
Log.info("Created jar " + createAndLoadJar("", classNamePrefix, counter));
LOG.info("Created jar " + createAndLoadJar("", classNamePrefix, counter));
final ClassFinder.ClassFilter notExcClassFilter = new ClassFinder.ClassFilter() {
@Override
@ -224,7 +228,7 @@ public class TestClassFinder {
final long counter = testCounter.incrementAndGet();
final String classNamePrefix = name.getMethodName();
String pkgNameSuffix = name.getMethodName();
Log.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
ClassFinder allClassesFinder = new ClassFinder();
String pkgName = makePackageName(pkgNameSuffix, counter);
Set<Class<?>> allClasses = allClassesFinder.findClasses(pkgName, false);
@ -247,7 +251,7 @@ public class TestClassFinder {
final long counter = testCounter.incrementAndGet();
final String classNamePrefix = name.getMethodName();
String pkgNameSuffix = name.getMethodName();
Log.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
final String classNameToFilterOut = classNamePrefix + counter;
final ClassFinder.FileNameFilter notThisFilter = new ClassFinder.FileNameFilter() {
@Override
@ -272,7 +276,7 @@ public class TestClassFinder {
final long counter = testCounter.incrementAndGet();
final String classNamePrefix = name.getMethodName();
String pkgNameSuffix = name.getMethodName();
Log.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
final Class<?> clazz = makeClass(pkgNameSuffix, classNamePrefix, counter);
final ClassFinder.ClassFilter notThisFilter = new ClassFinder.ClassFilter() {
@Override

View File

@ -22,6 +22,8 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -33,7 +35,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.mortbay.log.Log;
/**
* A client scanner for a region opened for read-only on the client side. Assumes region data
@ -42,6 +43,8 @@ import org.mortbay.log.Log;
@InterfaceAudience.Private
public class ClientSideRegionScanner extends AbstractClientScanner {
private static final Log LOG = LogFactory.getLog(ClientSideRegionScanner.class);
private HRegion region;
RegionScanner scanner;
List<Cell> values;
@ -96,7 +99,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
this.scanner.close();
this.scanner = null;
} catch (IOException ex) {
Log.warn("Exception while closing scanner", ex);
LOG.warn("Exception while closing scanner", ex);
}
}
if (this.region != null) {
@ -105,7 +108,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
this.region.close(true);
this.region = null;
} catch (IOException ex) {
Log.warn("Exception while closing region", ex);
LOG.warn("Exception while closing region", ex);
}
}
}

View File

@ -24,9 +24,9 @@ import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@ -46,6 +46,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -53,13 +55,15 @@ import org.mockito.Matchers;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.mortbay.log.Log;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@Category({SmallTests.class, ClientTests.class})
public class TestHBaseAdminNoCluster {
private static final Log LOG = LogFactory.getLog(TestHBaseAdminNoCluster.class);
/**
* Verify that PleaseHoldException gets retried.
* HBASE-8764
@ -99,7 +103,7 @@ public class TestHBaseAdminNoCluster {
admin.createTable(htd, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
fail();
} catch (RetriesExhaustedException e) {
Log.info("Expected fail", e);
LOG.info("Expected fail", e);
}
// Assert we were called 'count' times.
Mockito.verify(masterAdmin, Mockito.atLeast(count)).createTable((RpcController)Mockito.any(),
@ -317,7 +321,7 @@ public class TestHBaseAdminNoCluster {
caller.call(admin); // invoke the HBaseAdmin method
fail();
} catch (RetriesExhaustedException e) {
Log.info("Expected fail", e);
LOG.info("Expected fail", e);
}
// Assert we were called 'count' times.
caller.verify(masterAdmin, count);

View File

@ -31,12 +31,14 @@ import java.util.Collection;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.KeyValueUtil;
@ -58,7 +60,7 @@ import org.junit.rules.TestRule;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.mortbay.log.Log;
/**
* Test all of the data block encoding algorithms for correctness. Most of the
@ -67,6 +69,9 @@ import org.mortbay.log.Log;
@Category({IOTests.class, LargeTests.class})
@RunWith(Parameterized.class)
public class TestDataBlockEncoders {
private static final Log LOG = LogFactory.getLog(TestDataBlockEncoders.class);
@Rule public final TestRule timeout = CategoryBasedTimeout.builder().
withTimeout(this.getClass()).withLookingForStuckThread(true).build();
@ -189,7 +194,7 @@ public class TestDataBlockEncoders {
List<DataBlockEncoder.EncodedSeeker> encodedSeekers =
new ArrayList<DataBlockEncoder.EncodedSeeker>();
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
Log.info("Encoding: " + encoding);
LOG.info("Encoding: " + encoding);
// Off heap block data support not added for PREFIX_TREE DBE yet.
// TODO remove this once support is added. HBASE-12298
if (this.useOffheapData && encoding == DataBlockEncoding.PREFIX_TREE) continue;
@ -197,7 +202,7 @@ public class TestDataBlockEncoders {
if (encoder == null) {
continue;
}
Log.info("Encoder: " + encoder);
LOG.info("Encoder: " + encoder);
ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv,
getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
HFileContext meta = new HFileContextBuilder()
@ -211,7 +216,7 @@ public class TestDataBlockEncoders {
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
encodedSeekers.add(seeker);
}
Log.info("Testing it!");
LOG.info("Testing it!");
// test it!
// try a few random seeks
for (boolean seekBefore : new boolean[] { false, true }) {
@ -229,7 +234,7 @@ public class TestDataBlockEncoders {
}
// check edge cases
Log.info("Checking edge cases");
LOG.info("Checking edge cases");
checkSeekingConsistency(encodedSeekers, false, sampleKv.get(0));
for (boolean seekBefore : new boolean[] { false, true }) {
checkSeekingConsistency(encodedSeekers, seekBefore, sampleKv.get(sampleKv.size() - 1));
@ -237,7 +242,7 @@ public class TestDataBlockEncoders {
Cell lastMidKv =CellUtil.createLastOnRowCol(midKv);
checkSeekingConsistency(encodedSeekers, seekBefore, lastMidKv);
}
Log.info("Done");
LOG.info("Done");
}
static ByteBuffer encodeKeyValues(DataBlockEncoding encoding, List<KeyValue> kvs,

View File

@ -17,12 +17,16 @@
*/
package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
@ -42,7 +46,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mortbay.log.Log;
/**
* Tests that need to spin up a cluster testing an {@link HRegion}. Use
@ -51,6 +55,8 @@ import org.mortbay.log.Log;
*/
@Category({RegionServerTests.class, MediumTests.class})
public class TestHRegionOnCluster {
private static final Log LOG = LogFactory.getLog(TestHRegionOnCluster.class);
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@Test (timeout=300000)
@ -75,7 +81,7 @@ public class TestHRegionOnCluster {
assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));
// Put data: r1->v1
Log.info("Loading r1 to v1 into " + TABLENAME);
LOG.info("Loading r1 to v1 into " + TABLENAME);
Table table = TEST_UTIL.getConnection().getTable(TABLENAME);
putDataAndVerify(table, "r1", FAMILY, "v1", 1);
@ -94,7 +100,7 @@ public class TestHRegionOnCluster {
assertFalse(originServer.equals(targetServer));
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
Log.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName());
LOG.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName());
hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
Bytes.toBytes(targetServer.getServerName().getServerName()));
do {
@ -102,12 +108,12 @@ public class TestHRegionOnCluster {
} while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);
// Put data: r2->v2
Log.info("Loading r2 to v2 into " + TABLENAME);
LOG.info("Loading r2 to v2 into " + TABLENAME);
putDataAndVerify(table, "r2", FAMILY, "v2", 2);
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
// Move region to origin server
Log.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName());
LOG.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName());
hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
Bytes.toBytes(originServer.getServerName().getServerName()));
do {
@ -115,11 +121,11 @@ public class TestHRegionOnCluster {
} while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);
// Put data: r3->v3
Log.info("Loading r3 to v3 into " + TABLENAME);
LOG.info("Loading r3 to v3 into " + TABLENAME);
putDataAndVerify(table, "r3", FAMILY, "v3", 3);
// Kill target server
Log.info("Killing target server " + targetServer.getServerName());
LOG.info("Killing target server " + targetServer.getServerName());
targetServer.kill();
cluster.getRegionServerThreads().get(targetServerNum).join();
// Wait until finish processing of shutdown
@ -127,12 +133,12 @@ public class TestHRegionOnCluster {
Thread.sleep(5);
}
// Kill origin server
Log.info("Killing origin server " + targetServer.getServerName());
LOG.info("Killing origin server " + targetServer.getServerName());
originServer.kill();
cluster.getRegionServerThreads().get(originServerNum).join();
// Put data: r4->v4
Log.info("Loading r4 to v4 into " + TABLENAME);
LOG.info("Loading r4 to v4 into " + TABLENAME);
putDataAndVerify(table, "r4", FAMILY, "v4", 4);
} finally {

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -48,7 +50,6 @@ import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mortbay.log.Log;
import com.google.protobuf.ServiceException;
@ -58,6 +59,7 @@ import com.google.protobuf.ServiceException;
@Category({RegionServerTests.class, MediumTests.class})
public class TestRegionServerNoMaster {
private static final Log LOG = LogFactory.getLog(TestRegionServerNoMaster.class);
private static final int NB_SERVERS = 1;
private static Table table;
private static final byte[] row = "ee".getBytes();
@ -95,7 +97,7 @@ public class TestRegionServerNoMaster {
ServerName masterAddr = master.getServerName();
master.stopMaster();
Log.info("Waiting until master thread exits");
LOG.info("Waiting until master thread exits");
while (masterThread != null && masterThread.isAlive()) {
Threads.sleep(100);
}

View File

@ -26,6 +26,8 @@ import java.io.IOException;
import java.util.Collection;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
@ -53,7 +55,6 @@ import org.junit.experimental.categories.Category;
import org.mockito.Matchers;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.mortbay.log.Log;
/**
* Testcase for https://issues.apache.org/jira/browse/HBASE-13811
@ -61,6 +62,8 @@ import org.mortbay.log.Log;
@Category({ MediumTests.class })
public class TestSplitWalDataLoss {
private static final Log LOG = LogFactory.getLog(TestSplitWalDataLoss.class);
private final HBaseTestingUtility testUtil = new HBaseTestingUtility();
private NamespaceDescriptor namespace = NamespaceDescriptor.create(getClass().getSimpleName())
@ -122,7 +125,7 @@ public class TestSplitWalDataLoss {
.addColumn(family, qualifier, Bytes.toBytes("val0")));
}
long oldestSeqIdOfStore = region.getOldestSeqIdOfStore(family);
Log.info("CHANGE OLDEST " + oldestSeqIdOfStore);
LOG.info("CHANGE OLDEST " + oldestSeqIdOfStore);
assertTrue(oldestSeqIdOfStore > HConstants.NO_SEQNUM);
rs.cacheFlusher.requestFlush(spiedRegion, false);
synchronized (flushed) {