HADOOP-1616 [hbase] Sporadic TestTable failures

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@557098 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-07-18 00:38:58 +00:00
parent cbb844d5f0
commit 6a64ae1542
13 changed files with 86 additions and 89 deletions

View File

@ -64,4 +64,5 @@ Trunk (unreleased changes)
40. HADOOP-1607 [shell] Clear screen command (Edward Yoon via Stack)
41. HADOOP-1614 [hbase] HClient does not protect itself from simultaneous updates
42. HADOOP-1468 Add HBase batch update to reduce RPC overhead
43. HADOOP-1616 Sporadic TestTable failures

View File

@ -37,9 +37,9 @@ import java.util.*;
*
* <p>A single HLog is used by several HRegions simultaneously.
*
* <p>Each HRegion is identified by a unique long int. HRegions do not need to
* declare themselves before using the HLog; they simply include their
* HRegion-id in the {@link #append(Text, Text, Text, TreeMap, long)} or
* <p>Each HRegion is identified by a unique long <code>int</code>. HRegions do
* not need to declare themselves before using the HLog; they simply include
* their HRegion-id in the {@link #append(Text, Text, Text, TreeMap, long)} or
* {@link #completeCacheFlush(Text, Text, long)} calls.
*
* <p>An HLog consists of multiple on-disk files, which have a chronological
@ -95,16 +95,14 @@ public class HLog implements HConstants {
* @throws IOException
*/
static void splitLog(Path rootDir, Path srcDir, FileSystem fs,
Configuration conf) throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("splitting log files");
}
Configuration conf) throws IOException {
Path logfiles[] = fs.listPaths(srcDir);
if(LOG.isDebugEnabled()) {
LOG.debug("splitting " + logfiles.length + " log files in " +
srcDir.toString());
}
TreeMap<Text, SequenceFile.Writer> logWriters =
new TreeMap<Text, SequenceFile.Writer>();
try {
for(int i = 0; i < logfiles.length; i++) {
SequenceFile.Reader in =
@ -115,40 +113,35 @@ public class HLog implements HConstants {
while(in.next(key, val)) {
Text regionName = key.getRegionName();
SequenceFile.Writer w = logWriters.get(regionName);
if(w == null) {
if (w == null) {
Path logfile = new Path(HStoreFile.getHRegionDir(rootDir,
regionName), HREGION_OLDLOGFILE_NAME);
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
HLogEdit.class);
logWriters.put(regionName, w);
}
w.append(key, val);
}
} finally {
in.close();
}
}
} finally {
for(SequenceFile.Writer w: logWriters.values()) {
for (SequenceFile.Writer w: logWriters.values()) {
w.close();
}
}
if(fs.exists(srcDir)) {
if(! fs.delete(srcDir)) {
LOG.error("Cannot delete: " + srcDir);
if(! FileUtil.fullyDelete(new File(srcDir.toString()))) {
throw new IOException("Cannot delete: " + srcDir);
}
}
}
if(LOG.isDebugEnabled()) {
LOG.debug("log file splitting completed");
LOG.debug("log file splitting completed for " + srcDir.toString());
}
}
@ -213,25 +206,25 @@ public class HLog implements HConstants {
}
}
if(LOG.isDebugEnabled()) {
LOG.debug("closing current log writer and getting a new one");
}
// Close the current writer (if any), and grab a new one.
if(writer != null) {
writer.close();
if(filenum > 0) {
outputfiles.put(logSeqNum - 1, computeFilename(filenum - 1));
Path p = computeFilename(filenum - 1);
if(LOG.isDebugEnabled()) {
LOG.debug("Closing current log writer " + p.toString() +
" to get a new one");
}
if (filenum > 0) {
outputfiles.put(logSeqNum - 1, p);
}
}
Path newPath = computeFilename(filenum++);
this.writer = SequenceFile.createWriter(fs, conf, newPath, HLogKey.class, HLogEdit.class);
this.writer = SequenceFile.createWriter(fs, conf, newPath,
HLogKey.class, HLogEdit.class);
if(LOG.isDebugEnabled()) {
LOG.debug("new log writer created");
LOG.debug("new log writer created at " + newPath);
}
// Can we delete any of the old log files?
@ -239,8 +232,8 @@ public class HLog implements HConstants {
// over all the regions.
long oldestOutstandingSeqNum = Long.MAX_VALUE;
for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext(); ) {
long curSeqNum = it.next().longValue();
for(Long l: regionToLastFlush.values()) {
long curSeqNum = l.longValue();
if(curSeqNum < oldestOutstandingSeqNum) {
oldestOutstandingSeqNum = curSeqNum;
@ -249,14 +242,8 @@ public class HLog implements HConstants {
// Next, remove all files with a final ID that's older
// than the oldest pending region-operation.
if(LOG.isDebugEnabled()) {
LOG.debug("removing old log files");
}
for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext(); ) {
for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext();) {
long maxSeqNum = it.next().longValue();
if(maxSeqNum < oldestOutstandingSeqNum) {
Path p = outputfiles.get(maxSeqNum);
it.remove();
@ -269,16 +256,13 @@ public class HLog implements HConstants {
}
// Actually delete them, if any!
for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext(); ) {
Path p = it.next();
if(LOG.isDebugEnabled()) {
LOG.debug("removing old log file " + p.toString());
}
fs.delete(p);
}
if(LOG.isDebugEnabled()) {
LOG.debug("old log files deleted");
}
this.numEntries = 0;
}
}
@ -307,13 +291,10 @@ public class HLog implements HConstants {
*/
synchronized void close() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("closing log writer");
LOG.debug("closing log writer in " + this.dir.toString());
}
this.writer.close();
this.closed = true;
if(LOG.isDebugEnabled()) {
LOG.debug("log writer closed");
}
}
/**

View File

@ -1841,8 +1841,9 @@ public class HMaster implements HConstants, HMasterInterface,
connection.commit(metaRegionName, clientId, lockid,
System.currentTimeMillis());
// 4. Close the new region to flush it to disk
// 4. Close the new region to flush it to disk. Close its log file too.
r.close();
r.getLog().closeAndDelete();
// 5. Get it assigned to a server
unassignedRegions.put(regionName, info);

View File

@ -436,7 +436,6 @@ public class HRegion implements HConstants {
}
try {
return allHStoreFiles;
} finally {
synchronized (writestate) {
writestate.closed = true;
@ -1536,6 +1535,9 @@ public class HRegion implements HConstants {
/**
* Convenience method creating new HRegions.
* Note, this method creates an {@link HLog} for the created region. It
* needs to be closed explicitly. Use {@link HRegion#getLog()} to get
* access.
* @param regionId ID to use
* @param tableDesc Descriptor
* @param rootDir Root directory of HBase instance
@ -1550,11 +1552,13 @@ public class HRegion implements HConstants {
return createHRegion(new HRegionInfo(regionId, tableDesc, null, null),
rootDir, conf, null);
}
/**
* Convenience method creating new HRegions. Used by createTable and by the
* bootstrap code in the HMaster constructor
*
* bootstrap code in the HMaster constructor.
* Note, this method creates an {@link HLog} for the created region. It
* needs to be closed explicitly. Use {@link HRegion#getLog()} to get
* access.
* @param info Info for region to create.
* @param rootDir Root directory for HBase instance
* @param conf

View File

@ -95,8 +95,8 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
// Now create the root and meta regions and insert the data regions
// created above into the meta
HRegion root = createNewHRegion(fs, dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta = createNewHRegion(fs, dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion root = createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta = createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion.addRegionToMETA(root, meta);
@ -129,7 +129,8 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
private HRegion createAregion(Text startKey, Text endKey, int firstRow, int nrows)
throws IOException {
HRegion region = createNewHRegion(fs, dir, conf, desc, rand.nextLong(), startKey, endKey);
HRegion region = createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey);
System.out.println("created region " + region.getRegionName());

View File

@ -52,17 +52,15 @@ public abstract class HBaseTestCase extends TestCase {
return new Path(StaticTestEnvironment.TEST_DIRECTORY_KEY, testName);
}
protected HRegion createNewHRegion(FileSystem fs, Path dir,
Configuration conf, HTableDescriptor desc, long regionId, Text startKey,
Text endKey) throws IOException {
protected HRegion createNewHRegion(Path dir, Configuration c,
HTableDescriptor desc, long regionId, Text startKey, Text endKey)
throws IOException {
HRegionInfo info = new HRegionInfo(regionId, desc, startKey, endKey);
Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
FileSystem fs = dir.getFileSystem(c);
fs.mkdirs(regionDir);
return new HRegion(dir,
new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME), conf),
fs, conf, info, null);
}
}
}

View File

@ -266,7 +266,13 @@ public class MiniHBaseCluster implements HConstants {
}
}
LOG.info("HBase Cluster shutdown complete");
// Close the file system. Will complain if files open so helps w/ leaks.
try {
this.cluster.getFileSystem().close();
} catch (IOException e) {
LOG.error("Closing down dfs", e);
}
if(cluster != null) {
LOG.info("Shutting down Mini DFS cluster");
cluster.shutdown();

View File

@ -88,7 +88,7 @@ public class TestHLog extends HBaseTestCase implements HConstants {
}
} finally {
if (log != null) {
log.close();
log.closeAndDelete();
}
if (reader != null) {
reader.close();

View File

@ -610,7 +610,7 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
System.out.println("Merge regions elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
fs.delete(oldRegionPath);
fs.delete(oldRegion1);
fs.delete(oldRegion2);
@ -812,7 +812,11 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
private void cleanup() {
// Shut down the mini cluster
try {
log.closeAndDelete();
} catch (IOException e) {
e.printStackTrace();
}
cluster.shutdown();
cluster = null;

View File

@ -19,21 +19,16 @@
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
public class TestMergeTable extends AbstractMergeTestBase {
public void testMergeTable() {
public void testMergeTable() throws IOException {
MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster);
try {
MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster);
try {
HMerge.merge(conf, fs, desc.getName());
} finally {
hCluster.shutdown();
}
} catch(Throwable t) {
t.printStackTrace();
fail();
HMerge.merge(conf, fs, desc.getName());
} finally {
hCluster.shutdown();
}
}
}
}

View File

@ -260,7 +260,6 @@ public class TestScanner extends HBaseTestCase {
region.close();
log.closeAndDelete();
} catch(IOException e) {
e.printStackTrace();
throw e;

View File

@ -67,12 +67,19 @@ public class TestScanner2 extends HBaseClusterTestCase {
newRegions.add(HRegion.createHRegion(
new HRegionInfo(3L, desc, new Text("midway"), null),
homedir, this.conf, null));
for (HRegion r: newRegions) {
HRegion.addRegionToMETA(client, HConstants.META_TABLE_NAME, r,
this.cluster.getHMasterAddress(), -1L);
try {
for (HRegion r : newRegions) {
HRegion.addRegionToMETA(client, HConstants.META_TABLE_NAME, r,
this.cluster.getHMasterAddress(), -1L);
}
regions = scan(client, HConstants.META_TABLE_NAME);
assertEquals("Should be two regions only", 2, regions.size());
} finally {
for (HRegion r : newRegions) {
r.close();
r.getLog().closeAndDelete();
}
}
regions = scan(client, HConstants.META_TABLE_NAME);
assertEquals("Should be two regions only", 2, regions.size());
}
private List<HRegionInfo> scan(final HClient client, final Text table)

View File

@ -89,11 +89,11 @@ public class TestTableMapReduce extends HBaseTestCase {
// create the root and meta regions and insert the data region into the meta
HRegion root = createNewHRegion(fs, dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta = createNewHRegion(fs, dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion root = createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta = createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion.addRegionToMETA(root, meta);
HRegion region = createNewHRegion(fs, dir, conf, desc, rand.nextLong(), null, null);
HRegion region = createNewHRegion(dir, conf, desc, rand.nextLong(), null, null);
HRegion.addRegionToMETA(meta, region);
// insert some data into the test table