HBASE-7982 TestReplicationQueueFailover* runs for a minute, spews 3/4million lines complaining 'Filesystem closed', has an NPE, and still passes?

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1453712 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-03-07 07:01:54 +00:00
parent a92c7f7612
commit 21ea0b5ecf
4 changed files with 111 additions and 20 deletions

View File

@ -1159,23 +1159,27 @@ public class HRegionServer implements ClientProtocol,
}
private void closeWAL(final boolean delete) {
try {
if (this.hlogForMeta != null) {
//All hlogs (meta and non-meta) are in the same directory. Don't call
//closeAndDelete here since that would delete all hlogs not just the
//meta ones. We will just 'close' the hlog for meta here, and leave
//the directory cleanup to the follow-on closeAndDelete call.
if (this.hlogForMeta != null) {
// All hlogs (meta and non-meta) are in the same directory. Don't call
// closeAndDelete here since that would delete all hlogs not just the
// meta ones. We will just 'close' the hlog for meta here, and leave
// the directory cleanup to the follow-on closeAndDelete call.
try {
this.hlogForMeta.close();
} catch (Throwable e) {
LOG.error("Metalog close and delete failed", RemoteExceptionHandler.checkThrowable(e));
}
if (this.hlog != null) {
}
if (this.hlog != null) {
try {
if (delete) {
hlog.closeAndDelete();
} else {
hlog.close();
}
} catch (Throwable e) {
LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e));
}
} catch (Throwable e) {
LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e));
}
}

View File

@ -193,9 +193,9 @@ public class HLogSplitter {
status = TaskMonitor.get().createStatus(
"Splitting logs in " + srcDir);
long startTime = EnvironmentEdgeManager.currentTimeMillis();
status.setStatus("Determining files to split...");
List<Path> splits = null;
if (!fs.exists(srcDir)) {
@ -219,7 +219,7 @@ public class HLogSplitter {
LOG.info(msg);
return splits;
}
private void logAndReport(String msg) {
status.setStatus(msg);
LOG.info(msg);
@ -321,7 +321,7 @@ public class HLogSplitter {
}
}
status.setStatus("Log splits complete. Checking for orphaned logs.");
if (fs.listStatus(srcDir).length > processedLogs.size()
+ corruptedLogs.size()) {
throw new OrphanHLogAfterSplitException(
@ -511,7 +511,12 @@ public class HLogSplitter {
List<Path> corruptedLogs = new ArrayList<Path>();
FileSystem fs;
fs = rootdir.getFileSystem(conf);
Path logPath = new Path(logfile);
Path logPath = null;
if (FSUtils.isStartingWithPath(rootdir, logfile)) {
logPath = new Path(logfile);
} else {
logPath = new Path(rootdir, logfile);
}
if (ZKSplitLog.isCorrupted(rootdir, logPath.getName(), fs)) {
corruptedLogs.add(logPath);
} else {
@ -842,7 +847,7 @@ public class HLogSplitter {
buffer = new RegionEntryBuffer(key.getTablename(), key.getEncodedRegionName());
buffers.put(key.getEncodedRegionName(), buffer);
}
incrHeap= buffer.appendEntry(entry);
incrHeap= buffer.appendEntry(entry);
}
// If we crossed the chunk threshold, wait for more space to be available
@ -1092,7 +1097,7 @@ public class HLogSplitter {
/**
* A class used in distributed log splitting
*
*
*/
class DistributedLogSplittingHelper {
// Report progress, only used in distributed log splitting
@ -1143,7 +1148,7 @@ public class HLogSplitter {
new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR));
private boolean closeAndCleanCompleted = false;
private boolean logWritersClosed = false;
private final int numThreads;
@ -1171,7 +1176,7 @@ public class HLogSplitter {
}
/**
*
*
* @return null if failed to report progress
* @throws IOException
*/
@ -1303,7 +1308,7 @@ public class HLogSplitter {
}
return paths;
}
private List<IOException> closeLogWriters(List<IOException> thrown)
throws IOException {
if (!logWritersClosed) {

View File

@ -86,6 +86,61 @@ public abstract class FSUtils {
super();
}
/**
* Compare of path component. Does not consider schema; i.e. if schemas different but <code>path
* <code> starts with <code>rootPath<code>, then the function returns true
* @param rootPath
* @param path
* @return True if <code>path</code> starts with <code>rootPath</code>
*/
public static boolean isStartingWithPath(final Path rootPath, final String path) {
String uriRootPath = rootPath.toUri().getPath();
String tailUriPath = (new Path(path)).toUri().getPath();
return tailUriPath.startsWith(uriRootPath);
}
/**
* Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
* '/a/b/c' part. Does not consider schema; i.e. if schemas different but path or subpath matches,
* the two will equate.
* @param pathToSearch Path we will be trying to match.
* @param pathTail
* @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
*/
public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
return isMatchingTail(pathToSearch, new Path(pathTail));
}
/**
* Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
* '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider
* schema; i.e. if schemas different but path or subpath matches, the two will equate.
* @param pathToSearch Path we will be trying to match.
* @param pathTail
* @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
*/
public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
if (pathToSearch.depth() != pathTail.depth()) return false;
Path tailPath = pathTail;
String tailName;
Path toSearch = pathToSearch;
String toSearchName;
boolean result = false;
do {
tailName = tailPath.getName();
if (tailName == null || tailName.length() <= 0) {
result = true;
break;
}
toSearchName = toSearch.getName();
if (toSearchName == null || toSearchName.length() <= 0) break;
// Move up a parent on each path for next go around. Path doesn't let us go off the end.
tailPath = tailPath.getParent();
toSearch = toSearch.getParent();
} while(tailName.equals(toSearchName));
return result;
}
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
String scheme = fs.getUri().getScheme();
if (scheme == null) {

View File

@ -34,12 +34,12 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -49,6 +49,33 @@ import org.junit.experimental.categories.Category;
*/
@Category(MediumTests.class)
public class TestFSUtils {
/**
* Test path compare and prefix checking.
* @throws IOException
*/
@Test
public void testMatchingTail() throws IOException {
HBaseTestingUtility htu = new HBaseTestingUtility();
final FileSystem fs = htu.getTestFileSystem();
Path rootdir = htu.getDataTestDir();
assertTrue(rootdir.depth() > 1);
Path partPath = new Path("a", "b");
Path fullPath = new Path(rootdir, partPath);
Path fullyQualifiedPath = fs.makeQualified(fullPath);
assertFalse(FSUtils.isMatchingTail(fullPath, partPath));
assertFalse(FSUtils.isMatchingTail(fullPath, partPath.toString()));
assertTrue(FSUtils.isStartingWithPath(rootdir, fullPath.toString()));
assertTrue(FSUtils.isStartingWithPath(fullyQualifiedPath, fullPath.toString()));
assertFalse(FSUtils.isStartingWithPath(rootdir, partPath.toString()));
assertFalse(FSUtils.isMatchingTail(fullyQualifiedPath, partPath));
assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath));
assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath.toString()));
assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fs.makeQualified(fullPath)));
assertTrue(FSUtils.isStartingWithPath(rootdir, fullyQualifiedPath.toString()));
assertFalse(FSUtils.isMatchingTail(fullPath, new Path("x")));
assertFalse(FSUtils.isMatchingTail(new Path("x"), fullPath));
}
@Test
public void testVersion() throws DeserializationException, IOException {
HBaseTestingUtility htu = new HBaseTestingUtility();