HBASE-524 Problems with getFull
-Added new test case to exercise the problems -Fixed getFull implementation in HStore and Memcache git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@638525 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0b65a3cc46
commit
ea1ac1f75c
|
@ -45,6 +45,7 @@ Hbase Change Log
|
|||
HBASE-516 HStoreFile.finalKey does not update the final key if it is not
|
||||
the top region of a split region
|
||||
HBASE-525 HTable.getRow(Text) does not work (Clint Morgan via Bryan Duxbury)
|
||||
HBASE-524 Problems with getFull
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
|
||||
|
|
|
@ -1057,7 +1057,7 @@ public class HStore implements HConstants {
|
|||
*/
|
||||
void getFull(HStoreKey key, final Set<Text> columns, TreeMap<Text, Cell> results)
|
||||
throws IOException {
|
||||
Map<Text, List<Long>> deletes = new HashMap<Text, List<Long>>();
|
||||
Map<Text, Long> deletes = new HashMap<Text, Long>();
|
||||
|
||||
// if the key is null, we're not even looking for anything. return.
|
||||
if (key == null) {
|
||||
|
@ -1067,7 +1067,7 @@ public class HStore implements HConstants {
|
|||
this.lock.readLock().lock();
|
||||
|
||||
// get from the memcache first.
|
||||
memcache.getFull(key, columns, results);
|
||||
memcache.getFull(key, columns, deletes, results);
|
||||
|
||||
try {
|
||||
MapFile.Reader[] maparray = getReaders();
|
||||
|
@ -1078,33 +1078,7 @@ public class HStore implements HConstants {
|
|||
|
||||
// synchronize on the map so that no one else iterates it at the same
|
||||
// time
|
||||
synchronized(map) {
|
||||
// seek back to the beginning
|
||||
map.reset();
|
||||
|
||||
// seek to the closest key that should match the row we're looking for
|
||||
ImmutableBytesWritable readval = new ImmutableBytesWritable();
|
||||
HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
|
||||
if (readkey == null) {
|
||||
continue;
|
||||
}
|
||||
do {
|
||||
Text readcol = readkey.getColumn();
|
||||
if (results.get(readcol) == null
|
||||
&& key.matchesWithoutColumn(readkey)) {
|
||||
if(isDeleted(readkey, readval.get(), true, deletes)) {
|
||||
break;
|
||||
}
|
||||
if (columns == null || columns.contains(readkey.getColumn())) {
|
||||
results.put(new Text(readcol), new Cell(readval.get(), readkey.getTimestamp()));
|
||||
}
|
||||
readval = new ImmutableBytesWritable();
|
||||
} else if(key.getRow().compareTo(readkey.getRow()) < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
} while(map.next(readkey, readval));
|
||||
}
|
||||
getFullFromMapFile(map, key, columns, deletes, results);
|
||||
}
|
||||
|
||||
} finally {
|
||||
|
@ -1112,6 +1086,61 @@ public class HStore implements HConstants {
|
|||
}
|
||||
}
|
||||
|
||||
private void getFullFromMapFile(MapFile.Reader map, HStoreKey key,
|
||||
Set<Text> columns, Map<Text, Long> deletes, TreeMap<Text, Cell> results)
|
||||
throws IOException {
|
||||
synchronized(map) {
|
||||
// seek back to the beginning
|
||||
map.reset();
|
||||
|
||||
// seek to the closest key that should match the row we're looking for
|
||||
ImmutableBytesWritable readval = new ImmutableBytesWritable();
|
||||
HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
|
||||
if (readkey == null) {
|
||||
return;
|
||||
}
|
||||
do {
|
||||
Text readcol = readkey.getColumn();
|
||||
|
||||
// if we're looking for this column (or all of them), and there isn't
|
||||
// already a value for this column in the results map, and the key we
|
||||
// just read matches, then we'll consider it
|
||||
if ((columns == null || columns.contains(readcol))
|
||||
&& !results.containsKey(readcol)
|
||||
&& key.matchesWithoutColumn(readkey)) {
|
||||
// if the value of the cell we're looking at right now is a delete,
|
||||
// we need to treat it differently
|
||||
if(HLogEdit.isDeleted(readval.get())) {
|
||||
// if it's not already recorded as a delete or recorded with a more
|
||||
// recent delete timestamp, record it for later
|
||||
if (!deletes.containsKey(readcol)
|
||||
|| deletes.get(readcol).longValue() < readkey.getTimestamp()) {
|
||||
deletes.put(new Text(readcol), readkey.getTimestamp());
|
||||
}
|
||||
} else if (!(deletes.containsKey(readcol)
|
||||
&& deletes.get(readcol).longValue() >= readkey.getTimestamp()) ) {
|
||||
// So the cell itself isn't a delete, but there may be a delete
|
||||
// pending from earlier in our search. Only record this result if
|
||||
// there aren't any pending deletes.
|
||||
if (!(deletes.containsKey(readcol)
|
||||
&& deletes.get(readcol).longValue() >= readkey.getTimestamp())) {
|
||||
results.put(new Text(readcol),
|
||||
new Cell(readval.get(), readkey.getTimestamp()));
|
||||
// need to reinstantiate the readval so we can reuse it,
|
||||
// otherwise next iteration will destroy our result
|
||||
readval = new ImmutableBytesWritable();
|
||||
}
|
||||
}
|
||||
} else if(key.getRow().compareTo(readkey.getRow()) < 0) {
|
||||
// if we've crossed into the next row, then we can just stop
|
||||
// iterating
|
||||
break;
|
||||
}
|
||||
|
||||
} while(map.next(readkey, readval));
|
||||
}
|
||||
}
|
||||
|
||||
MapFile.Reader [] getReaders() {
|
||||
return this.readers.values().
|
||||
toArray(new MapFile.Reader[this.readers.size()]);
|
||||
|
|
|
@ -145,14 +145,15 @@ class Memcache {
|
|||
* @param key
|
||||
* @param results
|
||||
*/
|
||||
void getFull(HStoreKey key, Set<Text> columns, SortedMap<Text, Cell> results) {
|
||||
void getFull(HStoreKey key, Set<Text> columns, Map<Text, Long> deletes,
|
||||
SortedMap<Text, Cell> results) {
|
||||
this.lock.readLock().lock();
|
||||
try {
|
||||
synchronized (memcache) {
|
||||
internalGetFull(memcache, key, columns, results);
|
||||
internalGetFull(memcache, key, columns, deletes, results);
|
||||
}
|
||||
synchronized (snapshot) {
|
||||
internalGetFull(snapshot, key, columns, results);
|
||||
internalGetFull(snapshot, key, columns, deletes, results);
|
||||
}
|
||||
|
||||
} finally {
|
||||
|
@ -161,7 +162,8 @@ class Memcache {
|
|||
}
|
||||
|
||||
private void internalGetFull(SortedMap<HStoreKey, byte[]> map, HStoreKey key,
|
||||
Set<Text> columns, SortedMap<Text, Cell> results) {
|
||||
Set<Text> columns, Map<Text, Long> deletes,
|
||||
SortedMap<Text, Cell> results) {
|
||||
|
||||
if (map.isEmpty() || key == null) {
|
||||
return;
|
||||
|
@ -174,12 +176,17 @@ class Memcache {
|
|||
if (results.get(itCol) == null && key.matchesWithoutColumn(itKey)) {
|
||||
byte [] val = tailMap.get(itKey);
|
||||
|
||||
if (!HLogEdit.isDeleted(val)) {
|
||||
if (columns == null || columns.contains(itKey.getColumn())) {
|
||||
results.put(itCol, new Cell(val, itKey.getTimestamp()));
|
||||
if (columns == null || columns.contains(itKey.getColumn())) {
|
||||
if (HLogEdit.isDeleted(val)) {
|
||||
if (!deletes.containsKey(itCol)
|
||||
|| deletes.get(itCol).longValue() < itKey.getTimestamp()) {
|
||||
deletes.put(new Text(itCol), itKey.getTimestamp());
|
||||
}
|
||||
} else if (!(deletes.containsKey(itCol)
|
||||
&& deletes.get(itCol).longValue() >= itKey.getTimestamp())) {
|
||||
results.put(new Text(itCol), new Cell(val, itKey.getTimestamp()));
|
||||
}
|
||||
}
|
||||
|
||||
} else if (key.getRow().compareTo(itKey.getRow()) < 0) {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
import java.util.Map;
|
||||
import java.util.HashSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hbase.filter.StopRowFilter;
|
||||
|
@ -40,7 +41,7 @@ import org.apache.hadoop.hbase.io.BatchUpdate;
|
|||
* {@link TestGet} is a medley of tests of get all done up as a single test.
|
||||
* This class
|
||||
*/
|
||||
public class TestGet2 extends HBaseTestCase {
|
||||
public class TestGet2 extends HBaseTestCase implements HConstants {
|
||||
private MiniDFSCluster miniHdfs;
|
||||
|
||||
@Override
|
||||
|
@ -301,6 +302,111 @@ public class TestGet2 extends HBaseTestCase {
|
|||
assertNull(result.get(COLUMNS[2]));
|
||||
}
|
||||
|
||||
public void testGetFullMultiMapfile() throws IOException {
|
||||
HRegion region = null;
|
||||
HRegionIncommon region_incommon = null;
|
||||
BatchUpdate batchUpdate = null;
|
||||
Map<Text, Cell> results = null;
|
||||
|
||||
try {
|
||||
HTableDescriptor htd = createTableDescriptor(getName());
|
||||
region = createNewHRegion(htd, null, null);
|
||||
region_incommon = new HRegionIncommon(region);
|
||||
|
||||
//
|
||||
// Test ordering issue
|
||||
//
|
||||
Text row = new Text("row1");
|
||||
|
||||
// write some data
|
||||
batchUpdate = new BatchUpdate(row);
|
||||
batchUpdate.put(COLUMNS[0], "olderValue".getBytes());
|
||||
region.batchUpdate(batchUpdate);
|
||||
|
||||
// flush
|
||||
region.flushcache();
|
||||
|
||||
// assert that getFull gives us the older value
|
||||
results = region.getFull(row, (Set<Text>)null, LATEST_TIMESTAMP);
|
||||
assertEquals("olderValue", new String(results.get(COLUMNS[0]).getValue()));
|
||||
|
||||
// write a new value for the cell
|
||||
batchUpdate = new BatchUpdate(row);
|
||||
batchUpdate.put(COLUMNS[0], "newerValue".getBytes());
|
||||
region.batchUpdate(batchUpdate);
|
||||
|
||||
// flush
|
||||
region.flushcache();
|
||||
|
||||
// assert that getFull gives us the later value
|
||||
results = region.getFull(row, (Set<Text>)null, LATEST_TIMESTAMP);
|
||||
assertEquals("newerValue", new String(results.get(COLUMNS[0]).getValue()));
|
||||
|
||||
//
|
||||
// Test the delete masking issue
|
||||
//
|
||||
Text row2 = new Text("row2");
|
||||
Text cell1 = new Text(COLUMNS[0].toString() + "a");
|
||||
Text cell2 = new Text(COLUMNS[0].toString() + "b");
|
||||
Text cell3 = new Text(COLUMNS[0].toString() + "c");
|
||||
|
||||
// write some data at two columns
|
||||
batchUpdate = new BatchUpdate(row2);
|
||||
batchUpdate.put(cell1, "column0 value".getBytes());
|
||||
batchUpdate.put(cell2, "column1 value".getBytes());
|
||||
region.batchUpdate(batchUpdate);
|
||||
|
||||
// flush
|
||||
region.flushcache();
|
||||
|
||||
// assert i get both columns
|
||||
results = region.getFull(row2, (Set<Text>)null, LATEST_TIMESTAMP);
|
||||
assertEquals("Should have two columns in the results map", 2, results.size());
|
||||
assertEquals("column0 value", new String(results.get(cell1).getValue()));
|
||||
assertEquals("column1 value", new String(results.get(cell2).getValue()));
|
||||
|
||||
// write a delete for the first column
|
||||
batchUpdate = new BatchUpdate(row2);
|
||||
batchUpdate.delete(cell1);
|
||||
batchUpdate.put(cell2, "column1 new value".getBytes());
|
||||
region.batchUpdate(batchUpdate);
|
||||
|
||||
// flush
|
||||
region.flushcache();
|
||||
|
||||
// assert i get the second column only
|
||||
results = region.getFull(row2, (Set<Text>)null, LATEST_TIMESTAMP);
|
||||
assertEquals("Should have one column in the results map", 1, results.size());
|
||||
assertNull("column0 value", results.get(cell1));
|
||||
assertEquals("column1 new value", new String(results.get(cell2).getValue()));
|
||||
|
||||
//
|
||||
// Include a delete and value from the memcache in the mix
|
||||
//
|
||||
batchUpdate = new BatchUpdate(row2);
|
||||
batchUpdate.delete(cell2);
|
||||
batchUpdate.put(cell3, "column2 value!".getBytes());
|
||||
region.batchUpdate(batchUpdate);
|
||||
|
||||
// assert i get the third column only
|
||||
results = region.getFull(row2, (Set<Text>)null, LATEST_TIMESTAMP);
|
||||
assertEquals("Should have one column in the results map", 1, results.size());
|
||||
assertNull("column0 value", results.get(cell1));
|
||||
assertNull("column1 value", results.get(cell2));
|
||||
assertEquals("column2 value!", new String(results.get(cell3).getValue()));
|
||||
|
||||
} finally {
|
||||
if (region != null) {
|
||||
try {
|
||||
region.close();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertColumnsPresent(final HRegion r, final Text row)
|
||||
throws IOException {
|
||||
Map<Text, Cell> result = r.getFull(row, null, HConstants.LATEST_TIMESTAMP);
|
||||
|
|
|
@ -136,7 +136,8 @@ public class TestHMemcache extends TestCase {
|
|||
for (int i = 0; i < ROW_COUNT; i++) {
|
||||
HStoreKey hsk = new HStoreKey(getRowName(i));
|
||||
TreeMap<Text, Cell> all = new TreeMap<Text, Cell>();
|
||||
this.hmemcache.getFull(hsk, null, all);
|
||||
TreeMap<Text, Long> deletes = new TreeMap<Text, Long>();
|
||||
this.hmemcache.getFull(hsk, null, deletes, all);
|
||||
isExpectedRow(i, all);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue