HADOOP-2029 TestLogRolling fails too often in patch and nightlies
This is second commit against this issue. git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@583963 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
00f32814d1
commit
8b387823be
|
@ -319,11 +319,13 @@ public class HLog implements HConstants {
|
|||
// Now remove old log files (if any)
|
||||
LOG.debug("Found " + sequenceNumbers.size() + " logs to remove " +
|
||||
"using oldest outstanding seqnum of " + oldestOutstandingSeqNum);
|
||||
if (sequenceNumbers.size() > 0) {
|
||||
for (Long seq : sequenceNumbers) {
|
||||
deleteLogFile(this.outputfiles.remove(seq), seq);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
this.numEntries = 0;
|
||||
} finally {
|
||||
this.cacheFlushLock.unlock();
|
||||
|
|
|
@ -1105,9 +1105,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
/** {@inheritDoc} */
|
||||
public void batchUpdate(Text regionName, long timestamp, BatchUpdate b)
|
||||
throws IOException {
|
||||
|
||||
checkOpen();
|
||||
requestCount.incrementAndGet();
|
||||
this.requestCount.incrementAndGet();
|
||||
// If timestamp == LATEST_TIMESTAMP and we have deletes, then they need
|
||||
// special treatment. For these we need to first find the latest cell so
|
||||
// when we write the delete, we write it with the latest cells' timestamp
|
||||
|
@ -1239,14 +1238,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
//
|
||||
|
||||
protected long startUpdate(Text regionName, Text row) throws IOException {
|
||||
|
||||
HRegion region = getRegion(regionName);
|
||||
HRegion region = getRegion(regionName, false);
|
||||
return region.startUpdate(row);
|
||||
}
|
||||
|
||||
protected void put(final Text regionName, final long lockid,
|
||||
final Text column, final byte [] val) throws IOException {
|
||||
|
||||
final Text column, final byte [] val)
|
||||
throws IOException {
|
||||
HRegion region = getRegion(regionName, true);
|
||||
region.put(lockid, column, val);
|
||||
}
|
||||
|
@ -1298,7 +1296,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
*/
|
||||
protected HRegion getRegion(final Text regionName)
|
||||
throws NotServingRegionException {
|
||||
|
||||
return getRegion(regionName, false);
|
||||
}
|
||||
|
||||
|
@ -1311,8 +1308,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
* @throws NotServingRegionException
|
||||
*/
|
||||
protected HRegion getRegion(final Text regionName,
|
||||
final boolean checkRetiringRegions) throws NotServingRegionException {
|
||||
|
||||
final boolean checkRetiringRegions)
|
||||
throws NotServingRegionException {
|
||||
HRegion region = null;
|
||||
this.lock.readLock().lock();
|
||||
try {
|
||||
|
@ -1342,7 +1339,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
* @throws IOException
|
||||
*/
|
||||
private void checkOpen() throws IOException {
|
||||
if (stopRequested.get() || abortRequested) {
|
||||
if (this.stopRequested.get() || this.abortRequested) {
|
||||
throw new IOException("Server not running");
|
||||
}
|
||||
if (!fsOk) {
|
||||
|
|
|
@ -53,8 +53,8 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
*/
|
||||
public static void makeMultiRegionTable(Configuration conf,
|
||||
MiniHBaseCluster cluster, FileSystem localFs, String tableName,
|
||||
String columnName) throws IOException {
|
||||
|
||||
String columnName)
|
||||
throws IOException {
|
||||
final int retries = 10;
|
||||
final long waitTime =
|
||||
conf.getLong("hbase.master.meta.thread.rescanfrequency", 10L * 1000L);
|
||||
|
@ -62,7 +62,6 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
// This size should make it so we always split using the addContent
|
||||
// below. After adding all data, the first region is 1.3M. Should
|
||||
// set max filesize to be <= 1M.
|
||||
|
||||
assertTrue(conf.getLong("hbase.hregion.max.filesize",
|
||||
HConstants.DEFAULT_MAX_FILE_SIZE) <= 1024 * 1024);
|
||||
|
||||
|
@ -72,29 +71,25 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
Path d = fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
|
||||
|
||||
// Get connection on the meta table and get count of rows.
|
||||
|
||||
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
|
||||
int count = count(meta, HConstants.COLUMN_FAMILY_STR);
|
||||
int count = count(meta, tableName);
|
||||
HTable t = new HTable(conf, new Text(tableName));
|
||||
addContent(new HTableIncommon(t), columnName);
|
||||
|
||||
// All is running in the one JVM so I should be able to get the single
|
||||
// region instance and bring on a split.
|
||||
|
||||
HRegionInfo hri =
|
||||
t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
|
||||
HRegion r = cluster.regionThreads.get(0).getRegionServer().
|
||||
onlineRegions.get(hri.getRegionName());
|
||||
|
||||
// Flush will provoke a split next time the split-checker thread runs.
|
||||
|
||||
r.flushcache(false);
|
||||
|
||||
// Now, wait until split makes it into the meta table.
|
||||
|
||||
int oldCount = count;
|
||||
for (int i = 0; i < retries; i++) {
|
||||
count = count(meta, HConstants.COLUMN_FAMILY_STR);
|
||||
count = count(meta, tableName);
|
||||
if (count > oldCount) {
|
||||
break;
|
||||
}
|
||||
|
@ -111,7 +106,6 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
// Get info on the parent from the meta table. Pass in 'hri'. Its the
|
||||
// region we have been dealing with up to this. Its the parent of the
|
||||
// region split.
|
||||
|
||||
Map<Text, byte []> data = getSplitParentInfo(meta, hri);
|
||||
HRegionInfo parent =
|
||||
Writables.getHRegionInfoOrNull(data.get(HConstants.COL_REGIONINFO));
|
||||
|
@ -185,25 +179,29 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
}
|
||||
|
||||
/*
|
||||
* Count of rows in table for given column.
|
||||
* Count of regions in passed meta table.
|
||||
* @param t
|
||||
* @param column
|
||||
* @return
|
||||
* @throws IOException
|
||||
*/
|
||||
private static int count(final HTable t, final String column)
|
||||
private static int count(final HTable t, final String tableName)
|
||||
throws IOException {
|
||||
|
||||
int size = 0;
|
||||
Text [] cols = new Text[] {new Text(column)};
|
||||
Text [] cols = new Text[] {HConstants.COLUMN_FAMILY};
|
||||
HScannerInterface s = t.obtainScanner(cols, HConstants.EMPTY_START_ROW,
|
||||
System.currentTimeMillis(), null);
|
||||
try {
|
||||
HStoreKey curKey = new HStoreKey();
|
||||
TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
|
||||
while(s.next(curKey, curVals)) {
|
||||
HRegionInfo hri = Writables.
|
||||
getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO));
|
||||
if (hri.getTableDesc().getName().toString().equals(tableName)) {
|
||||
size++;
|
||||
}
|
||||
}
|
||||
return size;
|
||||
} finally {
|
||||
s.close();
|
||||
|
@ -214,8 +212,8 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
* @return Return row info for passed in region or null if not found in scan.
|
||||
*/
|
||||
private static Map<Text, byte []> getSplitParentInfo(final HTable t,
|
||||
final HRegionInfo parent) throws IOException {
|
||||
|
||||
final HRegionInfo parent)
|
||||
throws IOException {
|
||||
HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY,
|
||||
HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
|
||||
try {
|
||||
|
@ -227,6 +225,7 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
if (hri == null) {
|
||||
continue;
|
||||
}
|
||||
// Make sure I get the parent.
|
||||
if (hri.getRegionName().toString().
|
||||
equals(parent.getRegionName().toString())) {
|
||||
return curVals;
|
||||
|
|
Loading…
Reference in New Issue