HBASE-913 Classes using log4j directly

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@703031 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-10-09 00:23:22 +00:00
parent 99c892f030
commit 609ff27c83
8 changed files with 22 additions and 31 deletions

View File

@ -18,6 +18,7 @@ Release 0.19.0 - Unreleased
HBASE-853 [shell] Cannot describe meta tables (Izaak Rubin via Stack)
HBASE-844 Can't pass script to hbase shell
HBASE-837 Add unit tests for ThriftServer.HBaseHandler (Izaak Rubin via Stack)
HBASE-913 Classes using log4j directly
IMPROVEMENTS
HBASE-901 Add a limit to key length, check key and value length on client side

View File

@ -23,7 +23,6 @@ import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.hbase.HConstants; //TODO: remove
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.RowResult;
@ -33,9 +32,10 @@ import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Construct a Lucene document per row, which is consumed by IndexOutputFormat
@ -43,8 +43,7 @@ import org.apache.lucene.document.Field;
*/
public class IndexTableReduce extends MapReduceBase implements
Reducer<ImmutableBytesWritable, RowResult, ImmutableBytesWritable, LuceneDocumentWrapper> {
private static final Logger LOG = Logger.getLogger(IndexTableReduce.class);
private static final Log LOG = LogFactory.getLog(IndexTableReduce.class);
private IndexConfiguration indexConf;
@Override

View File

@ -27,12 +27,13 @@ import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.log4j.Logger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/** Abstract base class for merge tests */
public abstract class AbstractMergeTestBase extends HBaseClusterTestCase {
static final Logger LOG =
Logger.getLogger(AbstractMergeTestBase.class.getName());
static final Log LOG =
LogFactory.getLog(AbstractMergeTestBase.class.getName());
protected static final byte [] COLUMN_NAME = Bytes.toBytes("contents:");
protected final Random rand = new Random();
protected HTableDescriptor desc;

View File

@ -30,7 +30,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.MapFile;
import org.apache.log4j.Logger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* <p>
@ -42,8 +43,8 @@ public class MapFilePerformanceEvaluation {
private static final int ROW_LENGTH = 1000;
private static final int ROW_COUNT = 1000000;
static final Logger LOG =
Logger.getLogger(MapFilePerformanceEvaluation.class.getName());
static final Log LOG =
LogFactory.getLog(MapFilePerformanceEvaluation.class.getName());
static ImmutableBytesWritable format(final int i, ImmutableBytesWritable w) {
String v = Integer.toString(i);

View File

@ -22,7 +22,8 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import java.util.List;
import org.apache.log4j.Logger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@ -33,8 +34,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
* each server.
*/
public class MiniHBaseCluster implements HConstants {
static final Logger LOG =
Logger.getLogger(MiniHBaseCluster.class.getName());
static final Log LOG = LogFactory.getLog(MiniHBaseCluster.class.getName());
private HBaseConfiguration conf;
private LocalHBaseCluster hbaseCluster;

View File

@ -27,8 +27,6 @@ import java.util.Random;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@ -47,8 +45,8 @@ import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiSearcher;
@ -82,8 +80,6 @@ public class TestTableIndex extends MultiRegionTable {
desc = new HTableDescriptor(TABLE_NAME);
desc.addFamily(new HColumnDescriptor(INPUT_COLUMN));
desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));
// Enable DEBUG-level MR logging.
Logger.getLogger("org.apache.hadoop.mapred").setLevel(Level.DEBUG);
}
@Override

View File

@ -29,7 +29,8 @@ import org.apache.hadoop.dfs.MiniDFSCluster;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.log4j.Logger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HColumnDescriptor;
@ -44,7 +45,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* HRegions or in the HBaseMaster, so only basic testing is possible.
*/
public class TestHRegion extends HBaseTestCase {
static final Logger LOG = Logger.getLogger(TestHRegion.class);
static final Log LOG = LogFactory.getLog(TestHRegion.class);
/**
* Since all the "tests" depend on the results of the previous test, they are

View File

@ -24,14 +24,11 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
@ -42,7 +39,6 @@ import org.apache.hadoop.hbase.util.Bytes;
* split and manufactures odd-ball split scenarios.
*/
public class TestSplit extends HBaseClusterTestCase {
@SuppressWarnings("hiding")
static final Log LOG = LogFactory.getLog(TestSplit.class.getName());
/** constructor */
@ -64,10 +60,6 @@ public class TestSplit extends HBaseClusterTestCase {
// This size should make it so we always split using the addContent
// below. After adding all data, the first region is 1.3M
conf.setLong("hbase.hregion.max.filesize", 1024 * 128);
Logger.getRootLogger().setLevel(Level.WARN);
Logger.getLogger(this.getClass().getPackage().getName()).
setLevel(Level.DEBUG);
}
/**