HBASE-613 Timestamp-anchored scanning fails to find all records
Three problems: - HRegionServer.next did not return null if there were no results - HTable$ClientScanner.next had wrong loop termination - TestMergeTool did not correctly set fs, hbase.rootdir git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@671731 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b33ad41da3
commit
ae97400be0
|
@ -1343,8 +1343,8 @@ public class HTable {
|
|||
byte [] localStartKey = oldRegion == null? startRow: oldRegion.getEndKey();
|
||||
|
||||
if (CLIENT_LOG.isDebugEnabled()) {
|
||||
CLIENT_LOG.debug("Advancing internal scanner to startKey at " +
|
||||
Bytes.toString(localStartKey));
|
||||
CLIENT_LOG.debug("Advancing internal scanner to startKey at '" +
|
||||
Bytes.toString(localStartKey) + "'");
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -1387,7 +1387,7 @@ public class HTable {
|
|||
RowResult values = null;
|
||||
do {
|
||||
values = getConnection().getRegionServerWithRetries(callable);
|
||||
} while (values != null && values.size() == 0 && nextScanner());
|
||||
} while ((values == null || values.size() == 0) && nextScanner());
|
||||
|
||||
if (values != null && values.size() != 0) {
|
||||
return values;
|
||||
|
|
|
@ -1128,7 +1128,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
// No data for this row, go get another.
|
||||
results.clear();
|
||||
}
|
||||
return new RowResult(key.getRow(), values);
|
||||
return values.size() == 0 ? null : new RowResult(key.getRow(), values);
|
||||
} catch (IOException e) {
|
||||
checkFileSystem();
|
||||
throw e;
|
||||
|
|
|
@ -25,7 +25,6 @@ import java.io.IOException;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -48,7 +47,6 @@ public class TestMergeTool extends HBaseTestCase {
|
|||
private HTableDescriptor desc;
|
||||
private byte [][][] rows;
|
||||
private MiniDFSCluster dfsCluster = null;
|
||||
private FileSystem fs;
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
|
@ -101,13 +99,19 @@ public class TestMergeTool extends HBaseTestCase {
|
|||
// Start up dfs
|
||||
this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
this.fs = this.dfsCluster.getFileSystem();
|
||||
|
||||
conf.set("fs.default.name", fs.getUri().toString());
|
||||
Path parentdir = fs.getHomeDirectory();
|
||||
conf.set(HConstants.HBASE_DIR, parentdir.toString());
|
||||
fs.mkdirs(parentdir);
|
||||
FSUtils.setVersion(fs, parentdir);
|
||||
|
||||
// Note: we must call super.setUp after starting the mini cluster or
|
||||
// we will end up with a local file system
|
||||
|
||||
super.setUp();
|
||||
|
||||
try {
|
||||
// Create root and meta regions
|
||||
createRootAndMetaRegions();
|
||||
/*
|
||||
* Create the regions we will merge
|
||||
*/
|
||||
|
@ -123,11 +127,6 @@ public class TestMergeTool extends HBaseTestCase {
|
|||
b.put(COLUMN_NAME, new ImmutableBytesWritable(row).get());
|
||||
regions[i].batchUpdate(b);
|
||||
}
|
||||
}
|
||||
// Create root and meta regions
|
||||
createRootAndMetaRegions();
|
||||
// Insert the regions we created into the meta
|
||||
for(int i = 0; i < regions.length; i++) {
|
||||
HRegion.addRegionToMETA(meta, regions[i]);
|
||||
}
|
||||
// Close root and meta regions
|
||||
|
|
Loading…
Reference in New Issue