From 21ea0b5ecf8bd410acba23d7158fa16f9a08d865 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 7 Mar 2013 07:01:54 +0000 Subject: [PATCH] HBASE-7982 TestReplicationQueueFailover* runs for a minute, spews 3/4million lines complaining 'Filesystem closed', has an NPE, and still passes? git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1453712 13f79535-47bb-0310-9956-ffa450edef68 --- .../hbase/regionserver/HRegionServer.java | 22 +++++--- .../hbase/regionserver/wal/HLogSplitter.java | 25 +++++---- .../org/apache/hadoop/hbase/util/FSUtils.java | 55 +++++++++++++++++++ .../apache/hadoop/hbase/util/TestFSUtils.java | 29 +++++++++- 4 files changed, 111 insertions(+), 20 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 319467410ac..4a8e5bf47c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1159,23 +1159,27 @@ public class HRegionServer implements ClientProtocol, } private void closeWAL(final boolean delete) { - try { - if (this.hlogForMeta != null) { - //All hlogs (meta and non-meta) are in the same directory. Don't call - //closeAndDelete here since that would delete all hlogs not just the - //meta ones. We will just 'close' the hlog for meta here, and leave - //the directory cleanup to the follow-on closeAndDelete call. + if (this.hlogForMeta != null) { + // All hlogs (meta and non-meta) are in the same directory. Don't call + // closeAndDelete here since that would delete all hlogs not just the + // meta ones. We will just 'close' the hlog for meta here, and leave + // the directory cleanup to the follow-on closeAndDelete call. + try { this.hlogForMeta.close(); + } catch (Throwable e) { + LOG.error("Metalog close and delete failed", RemoteExceptionHandler.checkThrowable(e)); } - if (this.hlog != null) { + } + if (this.hlog != null) { + try { if (delete) { hlog.closeAndDelete(); } else { hlog.close(); } + } catch (Throwable e) { + LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e)); } - } catch (Throwable e) { - LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index b1148eff21b..5ec4a65e9a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -193,9 +193,9 @@ public class HLogSplitter { status = TaskMonitor.get().createStatus( "Splitting logs in " + srcDir); - + long startTime = EnvironmentEdgeManager.currentTimeMillis(); - + status.setStatus("Determining files to split..."); List splits = null; if (!fs.exists(srcDir)) { @@ -219,7 +219,7 @@ public class HLogSplitter { LOG.info(msg); return splits; } - + private void logAndReport(String msg) { status.setStatus(msg); LOG.info(msg); @@ -321,7 +321,7 @@ public class HLogSplitter { } } status.setStatus("Log splits complete. Checking for orphaned logs."); - + if (fs.listStatus(srcDir).length > processedLogs.size() + corruptedLogs.size()) { throw new OrphanHLogAfterSplitException( @@ -511,7 +511,12 @@ public class HLogSplitter { List corruptedLogs = new ArrayList(); FileSystem fs; fs = rootdir.getFileSystem(conf); - Path logPath = new Path(logfile); + Path logPath = null; + if (FSUtils.isStartingWithPath(rootdir, logfile)) { + logPath = new Path(logfile); + } else { + logPath = new Path(rootdir, logfile); + } if (ZKSplitLog.isCorrupted(rootdir, logPath.getName(), fs)) { corruptedLogs.add(logPath); } else { @@ -842,7 +847,7 @@ public class HLogSplitter { buffer = new RegionEntryBuffer(key.getTablename(), key.getEncodedRegionName()); buffers.put(key.getEncodedRegionName(), buffer); } - incrHeap= buffer.appendEntry(entry); + incrHeap= buffer.appendEntry(entry); } // If we crossed the chunk threshold, wait for more space to be available @@ -1092,7 +1097,7 @@ public class HLogSplitter { /** * A class used in distributed log splitting - * + * */ class DistributedLogSplittingHelper { // Report progress, only used in distributed log splitting @@ -1143,7 +1148,7 @@ public class HLogSplitter { new TreeSet(Bytes.BYTES_COMPARATOR)); private boolean closeAndCleanCompleted = false; - + private boolean logWritersClosed = false; private final int numThreads; @@ -1171,7 +1176,7 @@ public class HLogSplitter { } /** - * + * * @return null if failed to report progress * @throws IOException */ @@ -1303,7 +1308,7 @@ public class HLogSplitter { } return paths; } - + private List closeLogWriters(List thrown) throws IOException { if (!logWritersClosed) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 861fb5c0528..680def3e48f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -86,6 +86,61 @@ public abstract class FSUtils { super(); } + /** + * Compare of path component. Does not consider schema; i.e. if schemas different but path + * starts with rootPath, then the function returns true + * @param rootPath + * @param path + * @return True if path starts with rootPath + */ + public static boolean isStartingWithPath(final Path rootPath, final String path) { + String uriRootPath = rootPath.toUri().getPath(); + String tailUriPath = (new Path(path)).toUri().getPath(); + return tailUriPath.startsWith(uriRootPath); + } + + /** + * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the + * '/a/b/c' part. Does not consider schema; i.e. if schemas different but path or subpath matches, + * the two will equate. + * @param pathToSearch Path we will be trying to match. + * @param pathTail + * @return True if pathTail is tail on the path of pathToSearch + */ + public static boolean isMatchingTail(final Path pathToSearch, String pathTail) { + return isMatchingTail(pathToSearch, new Path(pathTail)); + } + + /** + * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the + * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider + * schema; i.e. if schemas different but path or subpath matches, the two will equate. + * @param pathToSearch Path we will be trying to match. + * @param pathTail + * @return True if pathTail is tail on the path of pathToSearch + */ + public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) { + if (pathToSearch.depth() != pathTail.depth()) return false; + Path tailPath = pathTail; + String tailName; + Path toSearch = pathToSearch; + String toSearchName; + boolean result = false; + do { + tailName = tailPath.getName(); + if (tailName == null || tailName.length() <= 0) { + result = true; + break; + } + toSearchName = toSearch.getName(); + if (toSearchName == null || toSearchName.length() <= 0) break; + // Move up a parent on each path for next go around. Path doesn't let us go off the end. + tailPath = tailPath.getParent(); + toSearch = toSearch.getParent(); + } while(tailName.equals(toSearchName)); + return result; + } + public static FSUtils getInstance(FileSystem fs, Configuration conf) { String scheme = fs.getUri().getScheme(); if (scheme == null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index 45829837646..e2d3c3e3da7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -34,12 +34,12 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -49,6 +49,33 @@ import org.junit.experimental.categories.Category; */ @Category(MediumTests.class) public class TestFSUtils { + /** + * Test path compare and prefix checking. + * @throws IOException + */ + @Test + public void testMatchingTail() throws IOException { + HBaseTestingUtility htu = new HBaseTestingUtility(); + final FileSystem fs = htu.getTestFileSystem(); + Path rootdir = htu.getDataTestDir(); + assertTrue(rootdir.depth() > 1); + Path partPath = new Path("a", "b"); + Path fullPath = new Path(rootdir, partPath); + Path fullyQualifiedPath = fs.makeQualified(fullPath); + assertFalse(FSUtils.isMatchingTail(fullPath, partPath)); + assertFalse(FSUtils.isMatchingTail(fullPath, partPath.toString())); + assertTrue(FSUtils.isStartingWithPath(rootdir, fullPath.toString())); + assertTrue(FSUtils.isStartingWithPath(fullyQualifiedPath, fullPath.toString())); + assertFalse(FSUtils.isStartingWithPath(rootdir, partPath.toString())); + assertFalse(FSUtils.isMatchingTail(fullyQualifiedPath, partPath)); + assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath)); + assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath.toString())); + assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fs.makeQualified(fullPath))); + assertTrue(FSUtils.isStartingWithPath(rootdir, fullyQualifiedPath.toString())); + assertFalse(FSUtils.isMatchingTail(fullPath, new Path("x"))); + assertFalse(FSUtils.isMatchingTail(new Path("x"), fullPath)); + } + @Test public void testVersion() throws DeserializationException, IOException { HBaseTestingUtility htu = new HBaseTestingUtility();