HADOOP-1616 [hbase] Sporadic TestTable failures

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@557098 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-07-18 00:38:58 +00:00
parent cbb844d5f0
commit 6a64ae1542
13 changed files with 86 additions and 89 deletions

View File

@ -64,4 +64,5 @@ Trunk (unreleased changes)
40. HADOOP-1607 [shell] Clear screen command (Edward Yoon via Stack) 40. HADOOP-1607 [shell] Clear screen command (Edward Yoon via Stack)
41. HADOOP-1614 [hbase] HClient does not protect itself from simultaneous updates 41. HADOOP-1614 [hbase] HClient does not protect itself from simultaneous updates
42. HADOOP-1468 Add HBase batch update to reduce RPC overhead 42. HADOOP-1468 Add HBase batch update to reduce RPC overhead
43. HADOOP-1616 Sporadic TestTable failures

View File

@ -37,9 +37,9 @@ import java.util.*;
* *
* <p>A single HLog is used by several HRegions simultaneously. * <p>A single HLog is used by several HRegions simultaneously.
* *
* <p>Each HRegion is identified by a unique long int. HRegions do not need to * <p>Each HRegion is identified by a unique long <code>int</code>. HRegions do
* declare themselves before using the HLog; they simply include their * not need to declare themselves before using the HLog; they simply include
* HRegion-id in the {@link #append(Text, Text, Text, TreeMap, long)} or * their HRegion-id in the {@link #append(Text, Text, Text, TreeMap, long)} or
* {@link #completeCacheFlush(Text, Text, long)} calls. * {@link #completeCacheFlush(Text, Text, long)} calls.
* *
* <p>An HLog consists of multiple on-disk files, which have a chronological * <p>An HLog consists of multiple on-disk files, which have a chronological
@ -95,16 +95,14 @@ public class HLog implements HConstants {
* @throws IOException * @throws IOException
*/ */
static void splitLog(Path rootDir, Path srcDir, FileSystem fs, static void splitLog(Path rootDir, Path srcDir, FileSystem fs,
Configuration conf) throws IOException { Configuration conf) throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("splitting log files");
}
Path logfiles[] = fs.listPaths(srcDir); Path logfiles[] = fs.listPaths(srcDir);
if(LOG.isDebugEnabled()) {
LOG.debug("splitting " + logfiles.length + " log files in " +
srcDir.toString());
}
TreeMap<Text, SequenceFile.Writer> logWriters = TreeMap<Text, SequenceFile.Writer> logWriters =
new TreeMap<Text, SequenceFile.Writer>(); new TreeMap<Text, SequenceFile.Writer>();
try { try {
for(int i = 0; i < logfiles.length; i++) { for(int i = 0; i < logfiles.length; i++) {
SequenceFile.Reader in = SequenceFile.Reader in =
@ -115,40 +113,35 @@ public class HLog implements HConstants {
while(in.next(key, val)) { while(in.next(key, val)) {
Text regionName = key.getRegionName(); Text regionName = key.getRegionName();
SequenceFile.Writer w = logWriters.get(regionName); SequenceFile.Writer w = logWriters.get(regionName);
if(w == null) { if (w == null) {
Path logfile = new Path(HStoreFile.getHRegionDir(rootDir, Path logfile = new Path(HStoreFile.getHRegionDir(rootDir,
regionName), HREGION_OLDLOGFILE_NAME); regionName), HREGION_OLDLOGFILE_NAME);
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class, w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
HLogEdit.class); HLogEdit.class);
logWriters.put(regionName, w); logWriters.put(regionName, w);
} }
w.append(key, val); w.append(key, val);
} }
} finally { } finally {
in.close(); in.close();
} }
} }
} finally { } finally {
for(SequenceFile.Writer w: logWriters.values()) { for (SequenceFile.Writer w: logWriters.values()) {
w.close(); w.close();
} }
} }
if(fs.exists(srcDir)) { if(fs.exists(srcDir)) {
if(! fs.delete(srcDir)) { if(! fs.delete(srcDir)) {
LOG.error("Cannot delete: " + srcDir); LOG.error("Cannot delete: " + srcDir);
if(! FileUtil.fullyDelete(new File(srcDir.toString()))) { if(! FileUtil.fullyDelete(new File(srcDir.toString()))) {
throw new IOException("Cannot delete: " + srcDir); throw new IOException("Cannot delete: " + srcDir);
} }
} }
} }
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("log file splitting completed"); LOG.debug("log file splitting completed for " + srcDir.toString());
} }
} }
@ -213,25 +206,25 @@ public class HLog implements HConstants {
} }
} }
if(LOG.isDebugEnabled()) {
LOG.debug("closing current log writer and getting a new one");
}
// Close the current writer (if any), and grab a new one. // Close the current writer (if any), and grab a new one.
if(writer != null) { if(writer != null) {
writer.close(); writer.close();
Path p = computeFilename(filenum - 1);
if(filenum > 0) { if(LOG.isDebugEnabled()) {
outputfiles.put(logSeqNum - 1, computeFilename(filenum - 1)); LOG.debug("Closing current log writer " + p.toString() +
" to get a new one");
}
if (filenum > 0) {
outputfiles.put(logSeqNum - 1, p);
} }
} }
Path newPath = computeFilename(filenum++); Path newPath = computeFilename(filenum++);
this.writer = SequenceFile.createWriter(fs, conf, newPath, HLogKey.class, HLogEdit.class); this.writer = SequenceFile.createWriter(fs, conf, newPath,
HLogKey.class, HLogEdit.class);
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("new log writer created"); LOG.debug("new log writer created at " + newPath);
} }
// Can we delete any of the old log files? // Can we delete any of the old log files?
@ -239,8 +232,8 @@ public class HLog implements HConstants {
// over all the regions. // over all the regions.
long oldestOutstandingSeqNum = Long.MAX_VALUE; long oldestOutstandingSeqNum = Long.MAX_VALUE;
for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext(); ) { for(Long l: regionToLastFlush.values()) {
long curSeqNum = it.next().longValue(); long curSeqNum = l.longValue();
if(curSeqNum < oldestOutstandingSeqNum) { if(curSeqNum < oldestOutstandingSeqNum) {
oldestOutstandingSeqNum = curSeqNum; oldestOutstandingSeqNum = curSeqNum;
@ -249,14 +242,8 @@ public class HLog implements HConstants {
// Next, remove all files with a final ID that's older // Next, remove all files with a final ID that's older
// than the oldest pending region-operation. // than the oldest pending region-operation.
for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext();) {
if(LOG.isDebugEnabled()) {
LOG.debug("removing old log files");
}
for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext(); ) {
long maxSeqNum = it.next().longValue(); long maxSeqNum = it.next().longValue();
if(maxSeqNum < oldestOutstandingSeqNum) { if(maxSeqNum < oldestOutstandingSeqNum) {
Path p = outputfiles.get(maxSeqNum); Path p = outputfiles.get(maxSeqNum);
it.remove(); it.remove();
@ -269,16 +256,13 @@ public class HLog implements HConstants {
} }
// Actually delete them, if any! // Actually delete them, if any!
for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext(); ) { for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext(); ) {
Path p = it.next(); Path p = it.next();
if(LOG.isDebugEnabled()) {
LOG.debug("removing old log file " + p.toString());
}
fs.delete(p); fs.delete(p);
} }
if(LOG.isDebugEnabled()) {
LOG.debug("old log files deleted");
}
this.numEntries = 0; this.numEntries = 0;
} }
} }
@ -307,13 +291,10 @@ public class HLog implements HConstants {
*/ */
synchronized void close() throws IOException { synchronized void close() throws IOException {
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("closing log writer"); LOG.debug("closing log writer in " + this.dir.toString());
} }
this.writer.close(); this.writer.close();
this.closed = true; this.closed = true;
if(LOG.isDebugEnabled()) {
LOG.debug("log writer closed");
}
} }
/** /**

View File

@ -1841,8 +1841,9 @@ public class HMaster implements HConstants, HMasterInterface,
connection.commit(metaRegionName, clientId, lockid, connection.commit(metaRegionName, clientId, lockid,
System.currentTimeMillis()); System.currentTimeMillis());
// 4. Close the new region to flush it to disk // 4. Close the new region to flush it to disk. Close its log file too.
r.close(); r.close();
r.getLog().closeAndDelete();
// 5. Get it assigned to a server // 5. Get it assigned to a server
unassignedRegions.put(regionName, info); unassignedRegions.put(regionName, info);

View File

@ -436,7 +436,6 @@ public class HRegion implements HConstants {
} }
try { try {
return allHStoreFiles; return allHStoreFiles;
} finally { } finally {
synchronized (writestate) { synchronized (writestate) {
writestate.closed = true; writestate.closed = true;
@ -1536,6 +1535,9 @@ public class HRegion implements HConstants {
/** /**
* Convenience method creating new HRegions. * Convenience method creating new HRegions.
* Note, this method creates an {@link HLog} for the created region. It
* needs to be closed explicitly. Use {@link HRegion#getLog()} to get
* access.
* @param regionId ID to use * @param regionId ID to use
* @param tableDesc Descriptor * @param tableDesc Descriptor
* @param rootDir Root directory of HBase instance * @param rootDir Root directory of HBase instance
@ -1550,11 +1552,13 @@ public class HRegion implements HConstants {
return createHRegion(new HRegionInfo(regionId, tableDesc, null, null), return createHRegion(new HRegionInfo(regionId, tableDesc, null, null),
rootDir, conf, null); rootDir, conf, null);
} }
/** /**
* Convenience method creating new HRegions. Used by createTable and by the * Convenience method creating new HRegions. Used by createTable and by the
* bootstrap code in the HMaster constructor * bootstrap code in the HMaster constructor.
* * Note, this method creates an {@link HLog} for the created region. It
* needs to be closed explicitly. Use {@link HRegion#getLog()} to get
* access.
* @param info Info for region to create. * @param info Info for region to create.
* @param rootDir Root directory for HBase instance * @param rootDir Root directory for HBase instance
* @param conf * @param conf

View File

@ -95,8 +95,8 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
// Now create the root and meta regions and insert the data regions // Now create the root and meta regions and insert the data regions
// created above into the meta // created above into the meta
HRegion root = createNewHRegion(fs, dir, conf, HGlobals.rootTableDesc, 0L, null, null); HRegion root = createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta = createNewHRegion(fs, dir, conf, HGlobals.metaTableDesc, 1L, null, null); HRegion meta = createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion.addRegionToMETA(root, meta); HRegion.addRegionToMETA(root, meta);
@ -129,7 +129,8 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
private HRegion createAregion(Text startKey, Text endKey, int firstRow, int nrows) private HRegion createAregion(Text startKey, Text endKey, int firstRow, int nrows)
throws IOException { throws IOException {
HRegion region = createNewHRegion(fs, dir, conf, desc, rand.nextLong(), startKey, endKey);
HRegion region = createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey);
System.out.println("created region " + region.getRegionName()); System.out.println("created region " + region.getRegionName());

View File

@ -52,17 +52,15 @@ public abstract class HBaseTestCase extends TestCase {
return new Path(StaticTestEnvironment.TEST_DIRECTORY_KEY, testName); return new Path(StaticTestEnvironment.TEST_DIRECTORY_KEY, testName);
} }
protected HRegion createNewHRegion(FileSystem fs, Path dir, protected HRegion createNewHRegion(Path dir, Configuration c,
Configuration conf, HTableDescriptor desc, long regionId, Text startKey, HTableDescriptor desc, long regionId, Text startKey, Text endKey)
Text endKey) throws IOException { throws IOException {
HRegionInfo info = new HRegionInfo(regionId, desc, startKey, endKey); HRegionInfo info = new HRegionInfo(regionId, desc, startKey, endKey);
Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName); Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
FileSystem fs = dir.getFileSystem(c);
fs.mkdirs(regionDir); fs.mkdirs(regionDir);
return new HRegion(dir, return new HRegion(dir,
new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME), conf), new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME), conf),
fs, conf, info, null); fs, conf, info, null);
} }
}
}

View File

@ -266,7 +266,13 @@ public class MiniHBaseCluster implements HConstants {
} }
} }
LOG.info("HBase Cluster shutdown complete"); LOG.info("HBase Cluster shutdown complete");
// Close the file system. Will complain if files open so helps w/ leaks.
try {
this.cluster.getFileSystem().close();
} catch (IOException e) {
LOG.error("Closing down dfs", e);
}
if(cluster != null) { if(cluster != null) {
LOG.info("Shutting down Mini DFS cluster"); LOG.info("Shutting down Mini DFS cluster");
cluster.shutdown(); cluster.shutdown();

View File

@ -88,7 +88,7 @@ public class TestHLog extends HBaseTestCase implements HConstants {
} }
} finally { } finally {
if (log != null) { if (log != null) {
log.close(); log.closeAndDelete();
} }
if (reader != null) { if (reader != null) {
reader.close(); reader.close();

View File

@ -610,7 +610,7 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
System.out.println("Merge regions elapsed time: " System.out.println("Merge regions elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0)); + ((System.currentTimeMillis() - startTime) / 1000.0));
fs.delete(oldRegionPath); fs.delete(oldRegionPath);
fs.delete(oldRegion1); fs.delete(oldRegion1);
fs.delete(oldRegion2); fs.delete(oldRegion2);
@ -812,7 +812,11 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
private void cleanup() { private void cleanup() {
// Shut down the mini cluster // Shut down the mini cluster
try {
log.closeAndDelete();
} catch (IOException e) {
e.printStackTrace();
}
cluster.shutdown(); cluster.shutdown();
cluster = null; cluster = null;

View File

@ -19,21 +19,16 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.IOException;
public class TestMergeTable extends AbstractMergeTestBase { public class TestMergeTable extends AbstractMergeTestBase {
public void testMergeTable() { public void testMergeTable() throws IOException {
MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster);
try { try {
MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster); HMerge.merge(conf, fs, desc.getName());
try { } finally {
HMerge.merge(conf, fs, desc.getName()); hCluster.shutdown();
} finally {
hCluster.shutdown();
}
} catch(Throwable t) {
t.printStackTrace();
fail();
} }
} }
} }

View File

@ -260,7 +260,6 @@ public class TestScanner extends HBaseTestCase {
region.close(); region.close();
log.closeAndDelete(); log.closeAndDelete();
} catch(IOException e) { } catch(IOException e) {
e.printStackTrace(); e.printStackTrace();
throw e; throw e;

View File

@ -67,12 +67,19 @@ public class TestScanner2 extends HBaseClusterTestCase {
newRegions.add(HRegion.createHRegion( newRegions.add(HRegion.createHRegion(
new HRegionInfo(3L, desc, new Text("midway"), null), new HRegionInfo(3L, desc, new Text("midway"), null),
homedir, this.conf, null)); homedir, this.conf, null));
for (HRegion r: newRegions) { try {
HRegion.addRegionToMETA(client, HConstants.META_TABLE_NAME, r, for (HRegion r : newRegions) {
this.cluster.getHMasterAddress(), -1L); HRegion.addRegionToMETA(client, HConstants.META_TABLE_NAME, r,
this.cluster.getHMasterAddress(), -1L);
}
regions = scan(client, HConstants.META_TABLE_NAME);
assertEquals("Should be two regions only", 2, regions.size());
} finally {
for (HRegion r : newRegions) {
r.close();
r.getLog().closeAndDelete();
}
} }
regions = scan(client, HConstants.META_TABLE_NAME);
assertEquals("Should be two regions only", 2, regions.size());
} }
private List<HRegionInfo> scan(final HClient client, final Text table) private List<HRegionInfo> scan(final HClient client, final Text table)

View File

@ -89,11 +89,11 @@ public class TestTableMapReduce extends HBaseTestCase {
// create the root and meta regions and insert the data region into the meta // create the root and meta regions and insert the data region into the meta
HRegion root = createNewHRegion(fs, dir, conf, HGlobals.rootTableDesc, 0L, null, null); HRegion root = createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta = createNewHRegion(fs, dir, conf, HGlobals.metaTableDesc, 1L, null, null); HRegion meta = createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion.addRegionToMETA(root, meta); HRegion.addRegionToMETA(root, meta);
HRegion region = createNewHRegion(fs, dir, conf, desc, rand.nextLong(), null, null); HRegion region = createNewHRegion(dir, conf, desc, rand.nextLong(), null, null);
HRegion.addRegionToMETA(meta, region); HRegion.addRegionToMETA(meta, region);
// insert some data into the test table // insert some data into the test table