HADOOP-2029 TestLogRolling fails too often in patch and nightlies

This is second commit against this issue.


git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@583963 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-10-11 21:49:40 +00:00
parent 00f32814d1
commit 8b387823be
3 changed files with 32 additions and 34 deletions

View File

@ -319,8 +319,10 @@ public class HLog implements HConstants {
// Now remove old log files (if any) // Now remove old log files (if any)
LOG.debug("Found " + sequenceNumbers.size() + " logs to remove " + LOG.debug("Found " + sequenceNumbers.size() + " logs to remove " +
"using oldest outstanding seqnum of " + oldestOutstandingSeqNum); "using oldest outstanding seqnum of " + oldestOutstandingSeqNum);
for (Long seq : sequenceNumbers) { if (sequenceNumbers.size() > 0) {
deleteLogFile(this.outputfiles.remove(seq), seq); for (Long seq : sequenceNumbers) {
deleteLogFile(this.outputfiles.remove(seq), seq);
}
} }
} }
} }

View File

@ -1104,10 +1104,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
/** {@inheritDoc} */ /** {@inheritDoc} */
public void batchUpdate(Text regionName, long timestamp, BatchUpdate b) public void batchUpdate(Text regionName, long timestamp, BatchUpdate b)
throws IOException { throws IOException {
checkOpen(); checkOpen();
requestCount.incrementAndGet(); this.requestCount.incrementAndGet();
// If timestamp == LATEST_TIMESTAMP and we have deletes, then they need // If timestamp == LATEST_TIMESTAMP and we have deletes, then they need
// special treatment. For these we need to first find the latest cell so // special treatment. For these we need to first find the latest cell so
// when we write the delete, we write it with the latest cells' timestamp // when we write the delete, we write it with the latest cells' timestamp
@ -1116,7 +1115,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
List<Text> deletes = null; List<Text> deletes = null;
try { try {
long lockid = startUpdate(regionName, b.getRow()); long lockid = startUpdate(regionName, b.getRow());
for(BatchOperation op: b) { for (BatchOperation op: b) {
switch(op.getOp()) { switch(op.getOp()) {
case PUT: case PUT:
put(regionName, lockid, op.getColumn(), op.getValue()); put(regionName, lockid, op.getColumn(), op.getValue());
@ -1239,20 +1238,19 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
// //
protected long startUpdate(Text regionName, Text row) throws IOException { protected long startUpdate(Text regionName, Text row) throws IOException {
HRegion region = getRegion(regionName, false);
HRegion region = getRegion(regionName);
return region.startUpdate(row); return region.startUpdate(row);
} }
protected void put(final Text regionName, final long lockid, protected void put(final Text regionName, final long lockid,
final Text column, final byte [] val) throws IOException { final Text column, final byte [] val)
throws IOException {
HRegion region = getRegion(regionName, true); HRegion region = getRegion(regionName, true);
region.put(lockid, column, val); region.put(lockid, column, val);
} }
protected void delete(Text regionName, long lockid, Text column) protected void delete(Text regionName, long lockid, Text column)
throws IOException { throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
region.delete(lockid, column); region.delete(lockid, column);
} }
@ -1297,8 +1295,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
* @throws NotServingRegionException * @throws NotServingRegionException
*/ */
protected HRegion getRegion(final Text regionName) protected HRegion getRegion(final Text regionName)
throws NotServingRegionException { throws NotServingRegionException {
return getRegion(regionName, false); return getRegion(regionName, false);
} }
@ -1311,8 +1308,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
* @throws NotServingRegionException * @throws NotServingRegionException
*/ */
protected HRegion getRegion(final Text regionName, protected HRegion getRegion(final Text regionName,
final boolean checkRetiringRegions) throws NotServingRegionException { final boolean checkRetiringRegions)
throws NotServingRegionException {
HRegion region = null; HRegion region = null;
this.lock.readLock().lock(); this.lock.readLock().lock();
try { try {
@ -1342,7 +1339,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
* @throws IOException * @throws IOException
*/ */
private void checkOpen() throws IOException { private void checkOpen() throws IOException {
if (stopRequested.get() || abortRequested) { if (this.stopRequested.get() || this.abortRequested) {
throw new IOException("Server not running"); throw new IOException("Server not running");
} }
if (!fsOk) { if (!fsOk) {

View File

@ -53,8 +53,8 @@ public class MultiRegionTable extends HBaseTestCase {
*/ */
public static void makeMultiRegionTable(Configuration conf, public static void makeMultiRegionTable(Configuration conf,
MiniHBaseCluster cluster, FileSystem localFs, String tableName, MiniHBaseCluster cluster, FileSystem localFs, String tableName,
String columnName) throws IOException { String columnName)
throws IOException {
final int retries = 10; final int retries = 10;
final long waitTime = final long waitTime =
conf.getLong("hbase.master.meta.thread.rescanfrequency", 10L * 1000L); conf.getLong("hbase.master.meta.thread.rescanfrequency", 10L * 1000L);
@ -62,7 +62,6 @@ public class MultiRegionTable extends HBaseTestCase {
// This size should make it so we always split using the addContent // This size should make it so we always split using the addContent
// below. After adding all data, the first region is 1.3M. Should // below. After adding all data, the first region is 1.3M. Should
// set max filesize to be <= 1M. // set max filesize to be <= 1M.
assertTrue(conf.getLong("hbase.hregion.max.filesize", assertTrue(conf.getLong("hbase.hregion.max.filesize",
HConstants.DEFAULT_MAX_FILE_SIZE) <= 1024 * 1024); HConstants.DEFAULT_MAX_FILE_SIZE) <= 1024 * 1024);
@ -72,29 +71,25 @@ public class MultiRegionTable extends HBaseTestCase {
Path d = fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))); Path d = fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
// Get connection on the meta table and get count of rows. // Get connection on the meta table and get count of rows.
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
int count = count(meta, HConstants.COLUMN_FAMILY_STR); int count = count(meta, tableName);
HTable t = new HTable(conf, new Text(tableName)); HTable t = new HTable(conf, new Text(tableName));
addContent(new HTableIncommon(t), columnName); addContent(new HTableIncommon(t), columnName);
// All is running in the one JVM so I should be able to get the single // All is running in the one JVM so I should be able to get the single
// region instance and bring on a split. // region instance and bring on a split.
HRegionInfo hri = HRegionInfo hri =
t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo(); t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
HRegion r = cluster.regionThreads.get(0).getRegionServer(). HRegion r = cluster.regionThreads.get(0).getRegionServer().
onlineRegions.get(hri.getRegionName()); onlineRegions.get(hri.getRegionName());
// Flush will provoke a split next time the split-checker thread runs. // Flush will provoke a split next time the split-checker thread runs.
r.flushcache(false); r.flushcache(false);
// Now, wait until split makes it into the meta table. // Now, wait until split makes it into the meta table.
int oldCount = count; int oldCount = count;
for (int i = 0; i < retries; i++) { for (int i = 0; i < retries; i++) {
count = count(meta, HConstants.COLUMN_FAMILY_STR); count = count(meta, tableName);
if (count > oldCount) { if (count > oldCount) {
break; break;
} }
@ -111,7 +106,6 @@ public class MultiRegionTable extends HBaseTestCase {
// Get info on the parent from the meta table. Pass in 'hri'. Its the // Get info on the parent from the meta table. Pass in 'hri'. Its the
// region we have been dealing with up to this. Its the parent of the // region we have been dealing with up to this. Its the parent of the
// region split. // region split.
Map<Text, byte []> data = getSplitParentInfo(meta, hri); Map<Text, byte []> data = getSplitParentInfo(meta, hri);
HRegionInfo parent = HRegionInfo parent =
Writables.getHRegionInfoOrNull(data.get(HConstants.COL_REGIONINFO)); Writables.getHRegionInfoOrNull(data.get(HConstants.COL_REGIONINFO));
@ -185,24 +179,28 @@ public class MultiRegionTable extends HBaseTestCase {
} }
/* /*
* Count of rows in table for given column. * Count of regions in passed meta table.
* @param t * @param t
* @param column * @param column
* @return * @return
* @throws IOException * @throws IOException
*/ */
private static int count(final HTable t, final String column) private static int count(final HTable t, final String tableName)
throws IOException { throws IOException {
int size = 0; int size = 0;
Text [] cols = new Text[] {new Text(column)}; Text [] cols = new Text[] {HConstants.COLUMN_FAMILY};
HScannerInterface s = t.obtainScanner(cols, HConstants.EMPTY_START_ROW, HScannerInterface s = t.obtainScanner(cols, HConstants.EMPTY_START_ROW,
System.currentTimeMillis(), null); System.currentTimeMillis(), null);
try { try {
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>(); TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
size++; HRegionInfo hri = Writables.
getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO));
if (hri.getTableDesc().getName().toString().equals(tableName)) {
size++;
}
} }
return size; return size;
} finally { } finally {
@ -214,8 +212,8 @@ public class MultiRegionTable extends HBaseTestCase {
* @return Return row info for passed in region or null if not found in scan. * @return Return row info for passed in region or null if not found in scan.
*/ */
private static Map<Text, byte []> getSplitParentInfo(final HTable t, private static Map<Text, byte []> getSplitParentInfo(final HTable t,
final HRegionInfo parent) throws IOException { final HRegionInfo parent)
throws IOException {
HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY,
HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null); HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
try { try {
@ -223,10 +221,11 @@ public class MultiRegionTable extends HBaseTestCase {
TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>(); TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
HRegionInfo hri = Writables. HRegionInfo hri = Writables.
getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO)); getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO));
if (hri == null) { if (hri == null) {
continue; continue;
} }
// Make sure I get the parent.
if (hri.getRegionName().toString(). if (hri.getRegionName().toString().
equals(parent.getRegionName().toString())) { equals(parent.getRegionName().toString())) {
return curVals; return curVals;