HADOOP-1884 Remove useless debugging log messages from hbase.mapred
git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@575009 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c0c89222f1
commit
aeceb916fc
|
@ -57,6 +57,7 @@ Trunk (unreleased changes)
|
||||||
HADOOP-1835 Updated Documentation for HBase setup/installation
|
HADOOP-1835 Updated Documentation for HBase setup/installation
|
||||||
(Izaak Rubin via Stack)
|
(Izaak Rubin via Stack)
|
||||||
HADOOP-1868 Make default configuration more responsive
|
HADOOP-1868 Make default configuration more responsive
|
||||||
|
HADOOP-1884 Remove useless debugging log messages from hbase.mapred
|
||||||
|
|
||||||
|
|
||||||
Below are the list of changes before 2007-08-18
|
Below are the list of changes before 2007-08-18
|
||||||
|
|
|
@ -77,18 +77,14 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public TableRecordReader(Text startRow, Text endRow) throws IOException {
|
public TableRecordReader(Text startRow, Text endRow) throws IOException {
|
||||||
LOG.debug("start construct");
|
|
||||||
m_row = new TreeMap<Text, byte[]>();
|
m_row = new TreeMap<Text, byte[]>();
|
||||||
m_scanner = m_table.obtainScanner(m_cols, startRow);
|
m_scanner = m_table.obtainScanner(m_cols, startRow);
|
||||||
m_endRow = endRow;
|
m_endRow = endRow;
|
||||||
LOG.debug("end construct");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
LOG.debug("start close");
|
|
||||||
m_scanner.close();
|
m_scanner.close();
|
||||||
LOG.debug("end close");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -135,7 +131,6 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public boolean next(HStoreKey key, MapWritable value) throws IOException {
|
public boolean next(HStoreKey key, MapWritable value) throws IOException {
|
||||||
LOG.debug("start next");
|
|
||||||
m_row.clear();
|
m_row.clear();
|
||||||
HStoreKey tKey = key;
|
HStoreKey tKey = key;
|
||||||
boolean hasMore = m_scanner.next(tKey, m_row);
|
boolean hasMore = m_scanner.next(tKey, m_row);
|
||||||
|
@ -152,7 +147,6 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LOG.debug("end next");
|
|
||||||
return hasMore;
|
return hasMore;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,8 +169,6 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("unused")
|
@SuppressWarnings("unused")
|
||||||
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
|
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
|
||||||
LOG.debug("start getSplits");
|
|
||||||
|
|
||||||
Text[] startKeys = m_table.getStartKeys();
|
Text[] startKeys = m_table.getStartKeys();
|
||||||
if(startKeys == null || startKeys.length == 0) {
|
if(startKeys == null || startKeys.length == 0) {
|
||||||
throw new IOException("Expecting at least one region");
|
throw new IOException("Expecting at least one region");
|
||||||
|
@ -185,15 +177,15 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
||||||
for(int i = 0; i < startKeys.length; i++) {
|
for(int i = 0; i < startKeys.length; i++) {
|
||||||
splits[i] = new TableSplit(m_tableName, startKeys[i],
|
splits[i] = new TableSplit(m_tableName, startKeys[i],
|
||||||
((i + 1) < startKeys.length) ? startKeys[i + 1] : new Text());
|
((i + 1) < startKeys.length) ? startKeys[i + 1] : new Text());
|
||||||
LOG.debug("split: " + i + "->" + splits[i]);
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("split: " + i + "->" + splits[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
LOG.debug("end splits");
|
|
||||||
return splits;
|
return splits;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void configure(JobConf job) {
|
public void configure(JobConf job) {
|
||||||
LOG.debug("start configure");
|
|
||||||
Path[] tableNames = job.getInputPaths();
|
Path[] tableNames = job.getInputPaths();
|
||||||
m_tableName = new Text(tableNames[0].getName());
|
m_tableName = new Text(tableNames[0].getName());
|
||||||
String colArg = job.get(COLUMN_LIST);
|
String colArg = job.get(COLUMN_LIST);
|
||||||
|
@ -207,7 +199,6 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error(e);
|
LOG.error(e);
|
||||||
}
|
}
|
||||||
LOG.debug("end configure");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.mapred.OutputCollector;
|
||||||
import org.apache.hadoop.mapred.Reporter;
|
import org.apache.hadoop.mapred.Reporter;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scan an HBase table to sort by a specified sort column.
|
* Scan an HBase table to sort by a specified sort column.
|
||||||
|
@ -41,9 +40,6 @@ import org.apache.log4j.Logger;
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public abstract class TableMap extends MapReduceBase implements Mapper {
|
public abstract class TableMap extends MapReduceBase implements Mapper {
|
||||||
|
|
||||||
private static final Logger LOG = Logger.getLogger(TableMap.class.getName());
|
|
||||||
|
|
||||||
private TableOutputCollector m_collector;
|
private TableOutputCollector m_collector;
|
||||||
|
|
||||||
/** constructor*/
|
/** constructor*/
|
||||||
|
@ -86,12 +82,10 @@ public abstract class TableMap extends MapReduceBase implements Mapper {
|
||||||
public void map(WritableComparable key, Writable value,
|
public void map(WritableComparable key, Writable value,
|
||||||
OutputCollector output, Reporter reporter) throws IOException {
|
OutputCollector output, Reporter reporter) throws IOException {
|
||||||
|
|
||||||
LOG.debug("start map");
|
|
||||||
if(m_collector.collector == null) {
|
if(m_collector.collector == null) {
|
||||||
m_collector.collector = output;
|
m_collector.collector = output;
|
||||||
}
|
}
|
||||||
map((HStoreKey)key, (MapWritable)value, m_collector, reporter);
|
map((HStoreKey)key, (MapWritable)value, m_collector, reporter);
|
||||||
LOG.debug("end map");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -75,22 +75,13 @@ public class TableOutputFormat
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void write(Text key, MapWritable value) throws IOException {
|
public void write(Text key, MapWritable value) throws IOException {
|
||||||
LOG.debug("start write");
|
long xid = m_table.startUpdate(key); // start transaction
|
||||||
|
|
||||||
// start transaction
|
|
||||||
|
|
||||||
long xid = m_table.startUpdate(key);
|
|
||||||
|
|
||||||
for (Map.Entry<Writable, Writable> e: value.entrySet()) {
|
for (Map.Entry<Writable, Writable> e: value.entrySet()) {
|
||||||
m_table.put(xid, (Text)e.getKey(),
|
m_table.put(xid, (Text)e.getKey(),
|
||||||
((ImmutableBytesWritable)e.getValue()).get());
|
((ImmutableBytesWritable)e.getValue()).get());
|
||||||
}
|
}
|
||||||
|
m_table.commit(xid); // end transaction
|
||||||
// end transaction
|
|
||||||
|
|
||||||
m_table.commit(xid);
|
|
||||||
|
|
||||||
LOG.debug("end write");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,7 +96,6 @@ public class TableOutputFormat
|
||||||
|
|
||||||
// expecting exactly one path
|
// expecting exactly one path
|
||||||
|
|
||||||
LOG.debug("start get writer");
|
|
||||||
Text tableName = new Text(job.get(OUTPUT_TABLE));
|
Text tableName = new Text(job.get(OUTPUT_TABLE));
|
||||||
HTable table = null;
|
HTable table = null;
|
||||||
try {
|
try {
|
||||||
|
@ -114,7 +104,6 @@ public class TableOutputFormat
|
||||||
LOG.error(e);
|
LOG.error(e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
LOG.debug("end get writer");
|
|
||||||
return new TableRecordWriter(table);
|
return new TableRecordWriter(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,16 +29,12 @@ import org.apache.hadoop.mapred.MapReduceBase;
|
||||||
import org.apache.hadoop.mapred.OutputCollector;
|
import org.apache.hadoop.mapred.OutputCollector;
|
||||||
import org.apache.hadoop.mapred.Reducer;
|
import org.apache.hadoop.mapred.Reducer;
|
||||||
import org.apache.hadoop.mapred.Reporter;
|
import org.apache.hadoop.mapred.Reporter;
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write a table, sorting by the input key
|
* Write a table, sorting by the input key
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public abstract class TableReduce extends MapReduceBase implements Reducer {
|
public abstract class TableReduce extends MapReduceBase implements Reducer {
|
||||||
private static final Logger LOG =
|
|
||||||
Logger.getLogger(TableReduce.class.getName());
|
|
||||||
|
|
||||||
TableOutputCollector m_collector;
|
TableOutputCollector m_collector;
|
||||||
|
|
||||||
/** Constructor */
|
/** Constructor */
|
||||||
|
@ -71,12 +67,11 @@ public abstract class TableReduce extends MapReduceBase implements Reducer {
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public void reduce(WritableComparable key, Iterator values,
|
public void reduce(WritableComparable key, Iterator values,
|
||||||
OutputCollector output, Reporter reporter) throws IOException {
|
OutputCollector output, Reporter reporter) throws IOException {
|
||||||
LOG.debug("start reduce");
|
|
||||||
if(m_collector.collector == null) {
|
if(m_collector.collector == null) {
|
||||||
m_collector.collector = output;
|
m_collector.collector = output;
|
||||||
}
|
}
|
||||||
reduce((Text)key, values, m_collector, reporter);
|
reduce((Text)key, values, m_collector, reporter);
|
||||||
LOG.debug("end reduce");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue