diff --git a/CHANGES.txt b/CHANGES.txt index e5d2d8006ad..c84a00c7741 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -945,6 +945,8 @@ Release 0.21.0 - Unreleased HBASE-3033 [replication] ReplicationSink.replicateEntries improvements HBASE-3040 BlockIndex readIndex too slowly in heavy write scenario (Andy Chen via Stack) + HBASE-3030 The return code of many filesystem operations are not checked + (dhruba borthakur via Stack) NEW FEATURES HBASE-1961 HBase EC2 scripts diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 3bd11637451..0a4fbce99a9 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -368,7 +368,9 @@ public class HRegion implements HeapSize { // , Writable{ final Path initialFiles, final Path regiondir) throws IOException { if (initialFiles != null && fs.exists(initialFiles)) { - fs.rename(initialFiles, regiondir); + if (!fs.rename(initialFiles, regiondir)) { + LOG.warn("Unable to rename " + initialFiles + " to " + regiondir); + } } } diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index b18cf01f818..881c6e99f93 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -478,7 +478,9 @@ public class Store implements HeapSize { // Write-out finished successfully, move into the right spot Path dstPath = StoreFile.getUniqueFile(fs, homedir); LOG.info("Renaming flushed file at " + writer.getPath() + " to " + dstPath); - fs.rename(writer.getPath(), dstPath); + if (!fs.rename(writer.getPath(), dstPath)) { + LOG.warn("Unable to rename " + writer.getPath() + " to " + dstPath); + } StoreFile sf = new StoreFile(this.fs, dstPath, blockcache, this.conf, this.family.getBloomFilterType(), this.inMemory); diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index bea5d861d76..b5815968df7 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -345,10 +345,14 @@ public class HLog implements Syncable { if (failIfLogDirExists && fs.exists(dir)) { throw new IOException("Target HLog directory already exists: " + dir); } - fs.mkdirs(dir); + if (!fs.mkdirs(dir)) { + throw new IOException("Unable to mkdir " + dir); + } this.oldLogDir = oldLogDir; if (!fs.exists(oldLogDir)) { - fs.mkdirs(this.oldLogDir); + if (!fs.mkdirs(this.oldLogDir)) { + throw new IOException("Unable to mkdir " + this.oldLogDir); + } } this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32); this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true); @@ -724,7 +728,9 @@ public class HLog implements Syncable { LOG.info("moving old hlog file " + FSUtils.getPath(p) + " whose highest sequenceid is " + seqno + " to " + FSUtils.getPath(newPath)); - this.fs.rename(p, newPath); + if (!this.fs.rename(p, newPath)) { + throw new IOException("Unable to rename " + p + " to " + newPath); + } } /** @@ -758,12 +764,16 @@ public class HLog implements Syncable { close(); FileStatus[] files = fs.listStatus(this.dir); for(FileStatus file : files) { - fs.rename(file.getPath(), - getHLogArchivePath(this.oldLogDir, file.getPath())); + Path p = getHLogArchivePath(this.oldLogDir, file.getPath()); + if (!fs.rename(file.getPath(),p)) { + throw new IOException("Unable to rename " + file.getPath() + " to " + p); + } } LOG.debug("Moved " + files.length + " log files to " + FSUtils.getPath(this.oldLogDir)); - fs.delete(dir, true); + if (!fs.delete(dir, true)) { + LOG.info("Unable to delete " + dir); + } } /** diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index 3ffa951a95e..52e5d31de9b 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -153,11 +153,16 @@ public class HLogSplitter { Path newPath = HLog.getHLogArchivePath(oldLogDir, file.getPath()); LOG.info("Moving " + FSUtils.getPath(file.getPath()) + " to " + FSUtils.getPath(newPath)); - fs.rename(file.getPath(), newPath); + if (!fs.rename(file.getPath(), newPath)) { + throw new IOException("Unable to rename " + file.getPath() + + " to " + newPath); + } } LOG.debug("Moved " + files.length + " log files to " + FSUtils.getPath(oldLogDir)); - fs.delete(srcDir, true); + if (!fs.delete(srcDir, true)) { + throw new IOException("Unable to delete " + srcDir); + } } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); IOException io = new IOException("Cannot delete: " + srcDir); @@ -356,19 +361,27 @@ public class HLogSplitter { final Path corruptDir = new Path(conf.get(HConstants.HBASE_DIR), conf.get( "hbase.regionserver.hlog.splitlog.corrupt.dir", ".corrupt")); - fs.mkdirs(corruptDir); + if (!fs.mkdirs(corruptDir)) { + LOG.info("Unable to mkdir " + corruptDir); + } fs.mkdirs(oldLogDir); for (Path corrupted : corruptedLogs) { Path p = new Path(corruptDir, corrupted.getName()); - LOG.info("Moving corrupted log " + corrupted + " to " + p); - fs.rename(corrupted, p); + if (!fs.rename(corrupted, p)) { + LOG.info("Unable to move corrupted log " + corrupted + " to " + p); + } else { + LOG.info("Moving corrupted log " + corrupted + " to " + p); + } } for (Path p : processedLogs) { Path newPath = HLog.getHLogArchivePath(oldLogDir, p); - fs.rename(p, newPath); - LOG.info("Archived processed log " + p + " to " + newPath); + if (!fs.rename(p, newPath)) { + LOG.info("Unable to move " + p + " to " + newPath); + } else { + LOG.info("Archived processed log " + p + " to " + newPath); + } } }