HBASE-1526 mapreduce fixup; put back test, deprecate mapred package

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@785051 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2009-06-16 01:51:05 +00:00
parent 20544ecc25
commit a8167fcffa
19 changed files with 26 additions and 7 deletions

View File

@ -50,6 +50,7 @@ import org.apache.hadoop.mapred.JobConf;
* column content, etc.</li>
* </ul>
*/
@Deprecated
public class BuildTableIndex {
private static final String USAGE = "Usage: BuildTableIndex " +
"-m <numMapTasks> -r <numReduceTasks>\n -indexConf <iconfFile> " +

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.util.ProgramDriver;
* Driver for hbase mapreduce jobs. Select which to run by passing
* name of job to this main.
*/
@Deprecated
public class Driver {
/**
* @param args

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.mapred.Reporter;
/**
* Extract grouping columns from input record
*/
@Deprecated
public class GroupingTableMap
extends MapReduceBase
implements TableMap<ImmutableBytesWritable,RowResult> {

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.mapred.Partitioner;
* @param <K2>
* @param <V2>
*/
@Deprecated
public class HRegionPartitioner<K2,V2>
implements Partitioner<ImmutableBytesWritable, V2> {
private final Log LOG = LogFactory.getLog(TableInputFormat.class);

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.mapred.Reporter;
/**
* Pass the given key and record as-is to reduce
*/
@Deprecated
public class IdentityTableMap
extends MapReduceBase
implements TableMap<ImmutableBytesWritable, RowResult> {

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.mapred.Reporter;
/**
* Write to table each key, record pair
*/
@Deprecated
public class IdentityTableReduce
extends MapReduceBase
implements TableReduce<ImmutableBytesWritable, BatchUpdate> {

View File

@ -46,6 +46,7 @@ import org.w3c.dom.Text;
/**
* Configuration parameters for building a Lucene index
*/
@Deprecated
public class IndexConfiguration extends Configuration {
private static final Log LOG = LogFactory.getLog(IndexConfiguration.class);

View File

@ -41,6 +41,7 @@ import org.apache.lucene.search.Similarity;
* Create a local index, unwrap Lucene documents created by reduce, add them to
* the index, and copy the index to the destination.
*/
@Deprecated
public class IndexOutputFormat extends
FileOutputFormat<ImmutableBytesWritable, LuceneDocumentWrapper> {
static final Log LOG = LogFactory.getLog(IndexOutputFormat.class);

View File

@ -41,6 +41,7 @@ import org.apache.commons.logging.LogFactory;
* Construct a Lucene document per row, which is consumed by IndexOutputFormat
* to build a Lucene index
*/
@Deprecated
public class IndexTableReduce extends MapReduceBase implements
Reducer<ImmutableBytesWritable, RowResult, ImmutableBytesWritable, LuceneDocumentWrapper> {
private static final Log LOG = LogFactory.getLog(IndexTableReduce.class);

View File

@ -28,6 +28,7 @@ import org.apache.lucene.document.Document;
* A utility class used to pass a lucene document from reduce to OutputFormat.
* It doesn't really serialize/deserialize a lucene document.
*/
@Deprecated
public class LuceneDocumentWrapper implements Writable {
protected Document doc;

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.util.ToolRunner;
* Map outputs table rows IF the input row has columns that have content.
* Uses an {@link IdentityReducer}
*/
@Deprecated
public class RowCounter extends Configured implements Tool {
// Name of this 'program'
static final String NAME = "rowcounter";

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.util.StringUtils;
/**
* Convert HBase tabular data into a format that is consumable by Map/Reduce.
*/
@Deprecated
public class TableInputFormat extends TableInputFormatBase implements
JobConfigurable {
private final Log LOG = LogFactory.getLog(TableInputFormat.class);

View File

@ -74,6 +74,8 @@ import org.apache.hadoop.util.StringUtils;
* }
* </pre>
*/
@Deprecated
public abstract class TableInputFormatBase
implements InputFormat<ImmutableBytesWritable, RowResult> {
final Log LOG = LogFactory.getLog(TableInputFormatBase.class);

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.mapred.Mapper;
* @param <K> WritableComparable key class
* @param <V> Writable value class
*/
@Deprecated
public interface TableMap<K extends WritableComparable<K>, V extends Writable>
extends Mapper<ImmutableBytesWritable, RowResult, K, V> {

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.mapred.JobConf;
/**
* Utility for {@link TableMap} and {@link TableReduce}
*/
@Deprecated
@SuppressWarnings("unchecked")
public class TableMapReduceUtil {

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.util.Progressable;
/**
* Convert Map/Reduce output and write it to an HBase table
*/
@Deprecated
public class TableOutputFormat extends
FileOutputFormat<ImmutableBytesWritable, BatchUpdate> {

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.mapred.Reducer;
* @param <K> key class
* @param <V> value class
*/
@Deprecated
@SuppressWarnings("unchecked")
public interface TableReduce<K extends WritableComparable, V extends Writable>
extends Reducer<K, V, ImmutableBytesWritable, BatchUpdate> {

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.mapred.InputSplit;
/**
* A table split corresponds to a key range [low, high)
*/
@Deprecated
public class TableSplit implements InputSplit, Comparable<TableSplit> {
private byte [] m_tableName;
private byte [] m_startRow;

View File

@ -51,9 +51,9 @@ import org.apache.hadoop.mapred.Reporter;
* on our tables is simple - take every row in the table, reverse the value of
* a particular cell, and write it back to the table.
*/
public class DisabledTestTableMapReduce extends MultiRegionTable {
public class TestTableMapReduce extends MultiRegionTable {
private static final Log LOG =
LogFactory.getLog(DisabledTestTableMapReduce.class.getName());
LogFactory.getLog(TestTableMapReduce.class.getName());
static final String MULTI_REGION_TABLE_NAME = "mrtest";
static final String INPUT_COLUMN = "contents:";
@ -65,7 +65,7 @@ public class DisabledTestTableMapReduce extends MultiRegionTable {
};
/** constructor */
public DisabledTestTableMapReduce() {
public TestTableMapReduce() {
super(INPUT_COLUMN);
desc = new HTableDescriptor(MULTI_REGION_TABLE_NAME);
desc.addFamily(new HColumnDescriptor(INPUT_COLUMN));
@ -93,10 +93,10 @@ public class DisabledTestTableMapReduce extends MultiRegionTable {
if (value.size() != 1) {
throw new IOException("There should only be one input column");
}
byte [][] keys = value.keySet().toArray(new byte [value.size()][]);
byte [][] keys = value.keySet().toArray(new byte[value.size()][]);
if(!Bytes.equals(keys[0], Bytes.toBytes(INPUT_COLUMN))) {
throw new IOException("Wrong input column. Expected: " + INPUT_COLUMN
+ " but got: " + keys[0]);
throw new IOException("Wrong input column. Expected: '" + INPUT_COLUMN
+ "' but got: '" + Bytes.toString(keys[0]) + "'");
}
// Get the original value and reverse it
@ -130,7 +130,7 @@ public class DisabledTestTableMapReduce extends MultiRegionTable {
JobConf jobConf = null;
try {
LOG.info("Before map/reduce startup");
jobConf = new JobConf(conf, DisabledTestTableMapReduce.class);
jobConf = new JobConf(conf, TestTableMapReduce.class);
jobConf.setJobName("process column contents");
jobConf.setNumReduceTasks(1);
TableMapReduceUtil.initTableMapJob(Bytes.toString(table.getTableName()),