HADOOP-1760 Use new MapWritable and SortedMapWritable classes from org.apache.hadoop.io

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@571350 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2007-08-31 00:37:46 +00:00
parent aba565a228
commit a1689adf0e
17 changed files with 51 additions and 365 deletions

View File

@ -34,6 +34,8 @@ Trunk (unreleased changes)
HADOOP-1746 Clean up findbugs warnings
HADOOP-1757 Bloomfilters: single argument constructor, use enum for bloom
filter types
HADOOP-1760 Use new MapWritable and SortedMapWritable classes from
org.apache.hadoop.io
HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode'

View File

@ -26,12 +26,11 @@ import java.util.SortedMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@ -187,7 +186,7 @@ public class HBaseAdmin implements HConstants {
break;
}
boolean found = false;
for (Map.Entry<WritableComparable, Writable> e: values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
HStoreKey key = (HStoreKey) e.getKey();
if (key.getColumn().equals(COL_REGIONINFO)) {
info = (HRegionInfo) Writables.getWritable(
@ -275,7 +274,7 @@ public class HBaseAdmin implements HConstants {
break;
}
valuesfound += 1;
for (Map.Entry<WritableComparable, Writable> e: values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
HStoreKey key = (HStoreKey) e.getKey();
if (key.getColumn().equals(COL_REGIONINFO)) {
info = (HRegionInfo) Writables.getWritable(
@ -375,7 +374,7 @@ public class HBaseAdmin implements HConstants {
break;
}
valuesfound += 1;
for (Map.Entry<WritableComparable, Writable> e: values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
HStoreKey key = (HStoreKey) e.getKey();
if (key.getColumn().equals(COL_REGIONINFO)) {
info = (HRegionInfo) Writables.getWritable(

View File

@ -33,12 +33,11 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.hbase.util.Writables;
/**
@ -254,7 +253,7 @@ public class HConnectionManager implements HConstants {
if (values == null || values.size() == 0) {
break;
}
for (Map.Entry<WritableComparable, Writable> e: values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
HStoreKey key = (HStoreKey) e.getKey();
if (key.getColumn().equals(COL_REGIONINFO)) {
info = (HRegionInfo) Writables.getWritable(
@ -686,7 +685,7 @@ public class HConnectionManager implements HConstants {
}
SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
for (Map.Entry<WritableComparable, Writable> e: values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
HStoreKey key = (HStoreKey) e.getKey();
results.put(key.getColumn(),
((ImmutableBytesWritable) e.getValue()).get());

View File

@ -47,17 +47,16 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Writables;
/**
@ -209,7 +208,7 @@ HMasterRegionInterface, Runnable {
break;
}
for (Map.Entry<WritableComparable, Writable> e: values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
HStoreKey key = (HStoreKey) e.getKey();
results.put(key.getColumn(),
((ImmutableBytesWritable) e.getValue()).get());
@ -1730,7 +1729,7 @@ HMasterRegionInterface, Runnable {
SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
Text row = null;
for (Map.Entry<WritableComparable, Writable> e: values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
HStoreKey key = (HStoreKey) e.getKey();
Text thisRow = key.getRow();
if (row == null) {
@ -2406,7 +2405,7 @@ HMasterRegionInterface, Runnable {
// be inserted if it exists so look for exact match on table name.
if (data != null && data.size() > 0) {
for (WritableComparable k: data.keySet()) {
for (Writable k: data.keySet()) {
if (HRegionInfo.getTableNameFromRegionName(
((HStoreKey) k).getRow()).equals(tableName)) {
@ -2553,8 +2552,7 @@ HMasterRegionInterface, Runnable {
break;
}
boolean haveRegionInfo = false;
for (Map.Entry<WritableComparable, Writable> e:
values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
byte[] value = ((ImmutableBytesWritable) e.getValue()).get();
if (value == null || value.length == 0) {

View File

@ -23,7 +23,8 @@ import java.io.IOException;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.VersionedProtocol;

View File

@ -40,9 +40,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.BatchOperation;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.hbase.util.Writables;
/*******************************************************************************
@ -1034,10 +1032,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
throws IOException {
requestCount.incrementAndGet();
HRegion region = getRegion(regionName);
MapWritable result = new MapWritable(HStoreKey.class,
ImmutableBytesWritable.class,
new TreeMap<WritableComparable, Writable>());
MapWritable result = new MapWritable();
TreeMap<Text, byte[]> map = region.getFull(row);
for (Map.Entry<Text, byte []> es: map.entrySet()) {
result.put(new HStoreKey(row, es.getKey()),
@ -1059,9 +1054,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
// Collect values to be returned here
MapWritable values = new MapWritable(HStoreKey.class,
ImmutableBytesWritable.class,
new TreeMap<WritableComparable, Writable>());
MapWritable values = new MapWritable();
// Keep getting rows until we find one that has at least one non-deleted column value

View File

@ -35,10 +35,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.ipc.RemoteException;
/**
@ -361,7 +361,7 @@ public class HTable implements HConstants {
}
SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
if (value != null && value.size() != 0) {
for (Map.Entry<WritableComparable, Writable> e: value.entrySet()) {
for (Map.Entry<Writable, Writable> e: value.entrySet()) {
HStoreKey key = (HStoreKey) e.getKey();
results.put(key.getColumn(),
((ImmutableBytesWritable) e.getValue()).get());
@ -764,7 +764,7 @@ public class HTable implements HConstants {
} while (values != null && values.size() == 0 && nextScanner());
if (values != null && values.size() != 0) {
for (Map.Entry<WritableComparable, Writable> e: values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
HStoreKey k = (HStoreKey) e.getKey();
key.setRow(k.getRow());
key.setVersion(k.getTimestamp());

View File

@ -1,303 +0,0 @@
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.util.TreeMap;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HConstants;
/**
*
*/
public class MapWritable implements Writable, Map<WritableComparable, Writable> {
private String keyClass = null;
private String valueClass = null;
private String mapClass = null;
private Map<WritableComparable, Writable> instance = null;
/**
* Default constructor used by writable
*/
public MapWritable() {}
/**
* @param keyClass the class of the keys
* @param valueClass the class of the values
* @param instance the Map to be wrapped in this Writable
*/
@SuppressWarnings("unchecked")
public MapWritable(Class keyClass, Class valueClass,
Map<WritableComparable, Writable> instance) {
this.keyClass = keyClass.getName();
this.valueClass = valueClass.getName();
this.instance = instance;
this.mapClass = instance.getClass().getName();
}
private void checkInitialized() {
if (keyClass == null ||
valueClass == null ||
mapClass == null ||
instance == null) {
throw new IllegalStateException("object has not been properly initialized");
}
}
/** {@inheritDoc} */
public void clear() {
checkInitialized();
instance.clear();
}
/** {@inheritDoc} */
public boolean containsKey(Object key) {
checkInitialized();
return instance.containsKey(key);
}
/** {@inheritDoc} */
public boolean containsValue(Object value) {
checkInitialized();
return instance.containsValue(value);
}
/** {@inheritDoc} */
public Set<Map.Entry<WritableComparable, Writable>> entrySet() {
checkInitialized();
return instance.entrySet();
}
/** {@inheritDoc} */
public Writable get(Object key) {
checkInitialized();
return instance.get(key);
}
/**
* Returns the value to which this map maps the specified key
* @param key
* @return value associated with specified key
*/
public Writable get(WritableComparable key) {
checkInitialized();
return instance.get(key);
}
/** {@inheritDoc} */
public boolean isEmpty() {
checkInitialized();
return instance.isEmpty();
}
/** {@inheritDoc} */
public Set<WritableComparable> keySet() {
checkInitialized();
return instance.keySet();
}
/** {@inheritDoc} */
public Writable put(WritableComparable key, Writable value) {
checkInitialized();
return instance.put(key, value);
}
/** {@inheritDoc} */
public void putAll(Map<? extends WritableComparable,? extends Writable> t) {
checkInitialized();
instance.putAll(t);
}
/** {@inheritDoc} */
public Writable remove(Object key) {
checkInitialized();
return instance.remove(key);
}
/**
* Removes the mapping for this key from this map if it is present
* @param key
* @return value corresponding to key
*/
public Writable remove(WritableComparable key) {
checkInitialized();
return instance.remove(key);
}
/** {@inheritDoc} */
public int size() {
checkInitialized();
return instance.size();
}
/** {@inheritDoc} */
public Collection<Writable> values() {
checkInitialized();
return instance.values();
}
// Writable
/** {@inheritDoc} */
public void write(DataOutput out) throws IOException {
checkInitialized();
out.writeUTF(mapClass);
out.writeUTF(keyClass);
out.writeUTF(valueClass);
out.writeInt(instance.size());
for (Map.Entry<WritableComparable, Writable> e: instance.entrySet()) {
e.getKey().write(out);
e.getValue().write(out);
}
}
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
mapClass = in.readUTF();
keyClass = in.readUTF();
valueClass = in.readUTF();
instance = (Map<WritableComparable, Writable>) objectFactory(mapClass);
int entries = in.readInt();
for (int i = 0; i < entries; i++) {
WritableComparable key = (WritableComparable) objectFactory(keyClass);
key.readFields(in);
Writable value = (Writable) objectFactory(valueClass);
value.readFields(in);
instance.put(key, value);
}
}
private Object objectFactory(String className) throws IOException {
Object o = null;
String exceptionMessage = null;
try {
o = Class.forName(className).newInstance();
} catch (ClassNotFoundException e) {
exceptionMessage = e.getMessage();
} catch (InstantiationException e) {
exceptionMessage = e.getMessage();
} catch (IllegalAccessException e) {
exceptionMessage = e.getMessage();
} finally {
if (exceptionMessage != null) {
throw new IOException("error instantiating " + className + " because " +
exceptionMessage);
}
}
return o;
}
/**
* A simple main program to test this class.
*
* @param args not used
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static void main(@SuppressWarnings("unused") String[] args)
throws IOException {
HStoreKey[] keys = {
new HStoreKey(new Text("row1"), HConstants.COL_REGIONINFO),
new HStoreKey(new Text("row2"), HConstants.COL_SERVER),
new HStoreKey(new Text("row3"), HConstants.COL_STARTCODE)
};
ImmutableBytesWritable[] values = {
new ImmutableBytesWritable("value1".getBytes()),
new ImmutableBytesWritable("value2".getBytes()),
new ImmutableBytesWritable("value3".getBytes())
};
@SuppressWarnings("unchecked")
MapWritable inMap = new MapWritable(HStoreKey.class,
ImmutableBytesWritable.class,
(Map) new TreeMap<HStoreKey, ImmutableBytesWritable>());
for (int i = 0; i < keys.length; i++) {
inMap.put(keys[i], values[i]);
}
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
DataOutput out = new DataOutputStream(bytes);
try {
inMap.write(out);
} catch (IOException e) {
e.printStackTrace();
throw e;
}
MapWritable outMap = new MapWritable();
DataInput in =
new DataInputStream(new ByteArrayInputStream(bytes.toByteArray()));
try {
outMap.readFields(in);
} catch (IOException e) {
e.printStackTrace();
throw e;
}
if (outMap.size() != inMap.size()) {
System.err.println("outMap.size()=" + outMap.size() + " != " +
"inMap.size()=" + inMap.size());
}
for (Map.Entry<WritableComparable, Writable> e: inMap.entrySet()) {
if (!outMap.containsKey(e.getKey())) {
System.err.println("outMap does not contain key " + e.getKey().toString());
continue;
}
if (((WritableComparable) outMap.get(e.getKey())).compareTo(
e.getValue()) != 0) {
System.err.println("output value for " + e.getKey().toString() + " != input value");
}
}
System.out.println("it worked!");
}
}

View File

@ -25,11 +25,10 @@ import java.util.Map;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
@ -88,7 +87,7 @@ public class GroupingTableMap extends TableMap {
* Pass the new key and value to reduce.
* If any of the grouping columns are not found in the value, the record is skipped.
*
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.hbase.io.MapWritable, org.apache.hadoop.hbase.mapred.TableOutputCollector, org.apache.hadoop.mapred.Reporter)
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.io.MapWritable, org.apache.hadoop.hbase.mapred.TableOutputCollector, org.apache.hadoop.mapred.Reporter)
*/
@Override
public void map(@SuppressWarnings("unused") HStoreKey key,
@ -116,7 +115,7 @@ public class GroupingTableMap extends TableMap {
ArrayList<byte[]> foundList = new ArrayList<byte[]>();
int numCols = m_columns.length;
if(numCols > 0) {
for (Map.Entry<WritableComparable, Writable> e: r.entrySet()) {
for (Map.Entry<Writable, Writable> e: r.entrySet()) {
Text column = (Text) e.getKey();
for (int i = 0; i < numCols; i++) {
if (column.equals(m_columns[i])) {

View File

@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.mapred;
import java.io.IOException;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.Reporter;
@ -40,7 +40,7 @@ public class IdentityTableMap extends TableMap {
/**
* Pass the key, value to reduce
*
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.hbase.io.MapWritable, org.apache.hadoop.hbase.mapred.TableOutputCollector, org.apache.hadoop.mapred.Reporter)
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.io.MapWritable, org.apache.hadoop.hbase.mapred.TableOutputCollector, org.apache.hadoop.mapred.Reporter)
*/
@Override
public void map(HStoreKey key, MapWritable value,

View File

@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.mapred;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.Reporter;

View File

@ -26,6 +26,7 @@ import java.util.TreeMap;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.InputFormat;
@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.HTable;
import org.apache.hadoop.hbase.HScannerInterface;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.log4j.Logger;
@ -107,9 +107,7 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
*/
@SuppressWarnings("unchecked")
public MapWritable createValue() {
return new MapWritable((Class) Text.class,
(Class) ImmutableBytesWritable.class,
(Map) new TreeMap<Text, ImmutableBytesWritable>());
return new MapWritable();
}
/** {@inheritDoc} */

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.mapred;
import java.io.IOException;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
@ -28,10 +30,8 @@ import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
/**

View File

@ -21,11 +21,10 @@ package org.apache.hadoop.hbase.mapred;
import java.io.IOException;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.hbase.io.MapWritable;
/**
* Refine the types that can be collected from a Table Map/Reduce jobs.
*/

View File

@ -23,9 +23,9 @@ import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.FileAlreadyExistsException;
import org.apache.hadoop.mapred.InvalidJobConfException;
import org.apache.hadoop.mapred.JobConf;
@ -36,7 +36,6 @@ import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.hbase.HTable;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.log4j.Logger;
@ -82,7 +81,7 @@ public class TableOutputFormat
long xid = m_table.startUpdate(key);
for (Map.Entry<WritableComparable, Writable> e: value.entrySet()) {
for (Map.Entry<Writable, Writable> e: value.entrySet()) {
m_table.put(xid, (Text)e.getKey(),
((ImmutableBytesWritable)e.getValue()).get());
}

View File

@ -32,17 +32,16 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.hbase.filter.RegExpRowFilter;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.filter.RowFilterSet;
import org.apache.hadoop.hbase.filter.StopRowFilter;
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
/**
* Additional scanner tests.
@ -219,7 +218,7 @@ public class TestScanner2 extends HBaseClusterTestCase {
break;
}
for (Map.Entry<WritableComparable, Writable> e: values.entrySet()) {
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
HStoreKey k = (HStoreKey) e.getKey();
results.put(k.getColumn(),
((ImmutableBytesWritable) e.getValue()).get());

View File

@ -29,15 +29,16 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.dfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.MapWritable;
import org.apache.hadoop.hbase.mapred.TableMap;
import org.apache.hadoop.hbase.mapred.TableOutputCollector;
import org.apache.hadoop.hbase.mapred.TableReduce;
import org.apache.hadoop.hbase.mapred.IdentityTableReduce;
/**
@ -120,7 +121,7 @@ public class TestTableMapReduce extends HBaseTestCase {
/**
* Pass the key, and reversed value to reduce
*
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.hbase.io.MapWritable, org.apache.hadoop.hbase.mapred.TableOutputCollector, org.apache.hadoop.mapred.Reporter)
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.io.MapWritable, org.apache.hadoop.hbase.mapred.TableOutputCollector, org.apache.hadoop.mapred.Reporter)
*/
@SuppressWarnings("unchecked")
@Override
@ -151,9 +152,7 @@ public class TestTableMapReduce extends HBaseTestCase {
// Now set the value to be collected
MapWritable outval = new MapWritable((Class) Text.class,
(Class) ImmutableBytesWritable.class,
(Map) new TreeMap<Text, ImmutableBytesWritable>());
MapWritable outval = new MapWritable();
outval.put(TEXT_OUTPUT_COLUMN,
new ImmutableBytesWritable(newValue.toString().getBytes()));
@ -163,6 +162,7 @@ public class TestTableMapReduce extends HBaseTestCase {
/**
* Test hbase mapreduce jobs against single region and multi-region tables.
* @throws IOException
*/
public void testTableMapReduce() throws IOException {
localTestSingleRegionTable();
@ -214,7 +214,7 @@ public class TestTableMapReduce extends HBaseTestCase {
TableMap.initJob(SINGLE_REGION_TABLE_NAME, INPUT_COLUMN,
ProcessContentsMapper.class, jobConf);
IdentityTableReduce.initJob(SINGLE_REGION_TABLE_NAME,
TableReduce.initJob(SINGLE_REGION_TABLE_NAME,
IdentityTableReduce.class, jobConf);
JobClient.runJob(jobConf);
@ -264,7 +264,7 @@ public class TestTableMapReduce extends HBaseTestCase {
TableMap.initJob(MULTI_REGION_TABLE_NAME, INPUT_COLUMN,
ProcessContentsMapper.class, jobConf);
IdentityTableReduce.initJob(MULTI_REGION_TABLE_NAME,
TableReduce.initJob(MULTI_REGION_TABLE_NAME,
IdentityTableReduce.class, jobConf);
JobClient.runJob(jobConf);
@ -306,6 +306,7 @@ public class TestTableMapReduce extends HBaseTestCase {
}
}
@SuppressWarnings("null")
private void verify(Configuration conf, String tableName) throws IOException {
HTable table = new HTable(conf, new Text(tableName));
@ -334,6 +335,8 @@ public class TestTableMapReduce extends HBaseTestCase {
}
// verify second value is the reverse of the first
assertNotNull(firstValue);
assertNotNull(secondValue);
assertEquals(firstValue.length, secondValue.length);
for (int i=0; i<firstValue.length; i++) {
assertEquals(firstValue[i], secondValue[firstValue.length-i-1]);