HBASE-4754 FSTableDescriptors.getTableInfoPath() should handle FileNotFoundException

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1198953 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2011-11-07 21:40:28 +00:00
parent f7e925c8d3
commit d54b1fdee7
11 changed files with 42 additions and 14 deletions

View File

@ -459,6 +459,7 @@ Release 0.92.0 - Unreleased
occasionally fails occasionally fails
HBASE-4753 org.apache.hadoop.hbase.regionserver.TestHRegionInfo#testGetSetOfHTD HBASE-4753 org.apache.hadoop.hbase.regionserver.TestHRegionInfo#testGetSetOfHTD
throws NPE on trunk (nkeywal) throws NPE on trunk (nkeywal)
HBASE-4754 FSTableDescriptors.getTableInfoPath() should handle FileNotFoundException
TESTS TESTS
HBASE-4450 test for number of blocks read: to serve as baseline for expected HBASE-4450 test for number of blocks read: to serve as baseline for expected

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
@ -324,7 +325,7 @@ class CatalogJanitor extends Chore {
family.getName()); family.getName());
if (!fs.exists(p)) continue; if (!fs.exists(p)) continue;
// Look for reference files. Call listStatus with anonymous instance of PathFilter. // Look for reference files. Call listStatus with anonymous instance of PathFilter.
FileStatus [] ps = fs.listStatus(p, FileStatus [] ps = FSUtils.listStatus(fs, p,
new PathFilter () { new PathFilter () {
public boolean accept(Path path) { public boolean accept(Path path) {
return StoreFile.isReference(path); return StoreFile.isReference(path);

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.FSUtils;
import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
@ -118,7 +119,7 @@ public class LogCleaner extends Chore {
@Override @Override
protected void chore() { protected void chore() {
try { try {
FileStatus [] files = this.fs.listStatus(this.oldLogDir); FileStatus [] files = FSUtils.listStatus(this.fs, this.oldLogDir, null);
if (files == null) return; if (files == null) return;
FILE: for (FileStatus file : files) { FILE: for (FileStatus file : files) {
Path filePath = file.getPath(); Path filePath = file.getPath();
@ -162,4 +163,4 @@ public class LogCleaner extends Chore {
} }
} }
} }
} }

View File

@ -191,7 +191,7 @@ public class MasterFileSystem {
List<ServerName> serverNames = new ArrayList<ServerName>(); List<ServerName> serverNames = new ArrayList<ServerName>();
try { try {
if (!this.fs.exists(logsDirPath)) return; if (!this.fs.exists(logsDirPath)) return;
FileStatus[] logFolders = this.fs.listStatus(logsDirPath); FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null);
if (logFolders == null || logFolders.length == 0) { if (logFolders == null || logFolders.length == 0) {
LOG.debug("No log files to split, proceeding..."); LOG.debug("No log files to split, proceeding...");

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException; import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@ -183,7 +184,8 @@ public class SplitLogManager extends ZooKeeperListener {
LOG.warn(hLogDir + " doesn't exist. Nothing to do!"); LOG.warn(hLogDir + " doesn't exist. Nothing to do!");
continue; continue;
} }
FileStatus[] logfiles = fs.listStatus(hLogDir); // TODO filter filenames? // TODO filter filenames?
FileStatus[] logfiles = FSUtils.listStatus(fs, hLogDir, null);
if (logfiles == null || logfiles.length == 0) { if (logfiles == null || logfiles.length == 0) {
LOG.info(hLogDir + " is empty dir, no logs to split"); LOG.info(hLogDir + " is empty dir, no logs to split");
} else { } else {
@ -989,7 +991,7 @@ public class SplitLogManager extends ZooKeeperListener {
} }
LOG.debug("re-listing " + logdir); LOG.debug("re-listing " + logdir);
tot_mgr_relist_logdir.incrementAndGet(); tot_mgr_relist_logdir.incrementAndGet();
FileStatus[] newfiles = fs.listStatus(logdir); FileStatus[] newfiles = FSUtils.listStatus(fs, logdir, null);
if (newfiles == null) { if (newfiles == null) {
return false; return false;
} }
@ -1049,4 +1051,4 @@ public class SplitLogManager extends ZooKeeperListener {
SUCCESS(), SUCCESS(),
FAILURE(); FAILURE();
} }
} }

View File

@ -3633,7 +3633,7 @@ public class HRegion implements HeapSize { // , Writable{
*/ */
private static void listPaths(FileSystem fs, Path dir) throws IOException { private static void listPaths(FileSystem fs, Path dir) throws IOException {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
FileStatus[] stats = fs.listStatus(dir); FileStatus[] stats = FSUtils.listStatus(fs, dir, null);
if (stats == null || stats.length == 0) { if (stats == null || stats.length == 0) {
return; return;
} }

View File

@ -27,8 +27,8 @@ import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.NavigableSet; import java.util.NavigableSet;
import java.util.SortedSet; import java.util.SortedSet;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.CollectionBackedScanner; import org.apache.hadoop.hbase.util.CollectionBackedScanner;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -257,7 +258,7 @@ public class Store implements HeapSize {
private List<StoreFile> loadStoreFiles() private List<StoreFile> loadStoreFiles()
throws IOException { throws IOException {
ArrayList<StoreFile> results = new ArrayList<StoreFile>(); ArrayList<StoreFile> results = new ArrayList<StoreFile>();
FileStatus files[] = this.fs.listStatus(this.homedir); FileStatus files[] = FSUtils.listStatus(this.fs, this.homedir, null);
for (int i = 0; files != null && i < files.length; i++) { for (int i = 0; files != null && i < files.length; i++) {
// Skip directories. // Skip directories.
if (files[i].isDir()) { if (files[i].isDir()) {

View File

@ -225,7 +225,7 @@ public class StoreFile {
} }
// cache the modification time stamp of this store file // cache the modification time stamp of this store file
FileStatus[] stats = fs.listStatus(p); FileStatus[] stats = FSUtils.listStatus(fs, p, null);
if (stats != null && stats.length == 1) { if (stats != null && stats.length == 1) {
this.modificationTimeStamp = stats[0].getModificationTime(); this.modificationTimeStamp = stats[0].getModificationTime();
} else { } else {

View File

@ -1693,7 +1693,7 @@ public class HLog implements Syncable {
NavigableSet<Path> filesSorted = new TreeSet<Path>(); NavigableSet<Path> filesSorted = new TreeSet<Path>();
Path editsdir = getRegionDirRecoveredEditsDir(regiondir); Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
if (!fs.exists(editsdir)) return filesSorted; if (!fs.exists(editsdir)) return filesSorted;
FileStatus[] files = fs.listStatus(editsdir, new PathFilter() { FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
@Override @Override
public boolean accept(Path p) { public boolean accept(Path p) {
boolean result = false; boolean result = false;

View File

@ -254,7 +254,7 @@ public class FSTableDescriptors implements TableDescriptors {
private static FileStatus getTableInfoPath(final FileSystem fs, private static FileStatus getTableInfoPath(final FileSystem fs,
final Path tabledir) final Path tabledir)
throws IOException { throws IOException {
FileStatus [] status = fs.listStatus(tabledir, new PathFilter() { FileStatus [] status = FSUtils.listStatus(fs, tabledir, new PathFilter() {
@Override @Override
public boolean accept(Path p) { public boolean accept(Path p) {
// Accept any file that starts with TABLEINFO_NAME // Accept any file that starts with TABLEINFO_NAME

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.util;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.EOFException; import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
@ -905,5 +906,26 @@ public abstract class FSUtils {
} }
return map; return map;
} }
/**
* Calls fs.listStatus() and treats FileNotFoundException as non-fatal
* This would accommodate difference in various hadoop versions
*
* @param fs file system
* @param dir directory
* @param filter path filter
* @return null if tabledir doesn't exist, otherwise FileStatus array
*/
public static FileStatus [] listStatus(final FileSystem fs,
final Path dir, final PathFilter filter) throws IOException {
FileStatus [] status = null;
try {
status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
} catch (FileNotFoundException fnfe) {
// if directory doesn't exist, return null
LOG.info(dir + " doesn't exist");
}
if (status == null || status.length < 1) return null;
return status;
}
} }