HBASE-3030 The return code of many filesystem operations are not checked
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1001954 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b11ac8d6f5
commit
7faaf57d1b
|
@ -945,6 +945,8 @@ Release 0.21.0 - Unreleased
|
|||
HBASE-3033 [replication] ReplicationSink.replicateEntries improvements
|
||||
HBASE-3040 BlockIndex readIndex too slowly in heavy write scenario
|
||||
(Andy Chen via Stack)
|
||||
HBASE-3030 The return code of many filesystem operations are not checked
|
||||
(dhruba borthakur via Stack)
|
||||
|
||||
NEW FEATURES
|
||||
HBASE-1961 HBase EC2 scripts
|
||||
|
|
|
@ -368,7 +368,9 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
final Path initialFiles, final Path regiondir)
|
||||
throws IOException {
|
||||
if (initialFiles != null && fs.exists(initialFiles)) {
|
||||
fs.rename(initialFiles, regiondir);
|
||||
if (!fs.rename(initialFiles, regiondir)) {
|
||||
LOG.warn("Unable to rename " + initialFiles + " to " + regiondir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -478,7 +478,9 @@ public class Store implements HeapSize {
|
|||
// Write-out finished successfully, move into the right spot
|
||||
Path dstPath = StoreFile.getUniqueFile(fs, homedir);
|
||||
LOG.info("Renaming flushed file at " + writer.getPath() + " to " + dstPath);
|
||||
fs.rename(writer.getPath(), dstPath);
|
||||
if (!fs.rename(writer.getPath(), dstPath)) {
|
||||
LOG.warn("Unable to rename " + writer.getPath() + " to " + dstPath);
|
||||
}
|
||||
|
||||
StoreFile sf = new StoreFile(this.fs, dstPath, blockcache,
|
||||
this.conf, this.family.getBloomFilterType(), this.inMemory);
|
||||
|
|
|
@ -345,10 +345,14 @@ public class HLog implements Syncable {
|
|||
if (failIfLogDirExists && fs.exists(dir)) {
|
||||
throw new IOException("Target HLog directory already exists: " + dir);
|
||||
}
|
||||
fs.mkdirs(dir);
|
||||
if (!fs.mkdirs(dir)) {
|
||||
throw new IOException("Unable to mkdir " + dir);
|
||||
}
|
||||
this.oldLogDir = oldLogDir;
|
||||
if (!fs.exists(oldLogDir)) {
|
||||
fs.mkdirs(this.oldLogDir);
|
||||
if (!fs.mkdirs(this.oldLogDir)) {
|
||||
throw new IOException("Unable to mkdir " + this.oldLogDir);
|
||||
}
|
||||
}
|
||||
this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
|
||||
this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true);
|
||||
|
@ -724,7 +728,9 @@ public class HLog implements Syncable {
|
|||
LOG.info("moving old hlog file " + FSUtils.getPath(p) +
|
||||
" whose highest sequenceid is " + seqno + " to " +
|
||||
FSUtils.getPath(newPath));
|
||||
this.fs.rename(p, newPath);
|
||||
if (!this.fs.rename(p, newPath)) {
|
||||
throw new IOException("Unable to rename " + p + " to " + newPath);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -758,12 +764,16 @@ public class HLog implements Syncable {
|
|||
close();
|
||||
FileStatus[] files = fs.listStatus(this.dir);
|
||||
for(FileStatus file : files) {
|
||||
fs.rename(file.getPath(),
|
||||
getHLogArchivePath(this.oldLogDir, file.getPath()));
|
||||
Path p = getHLogArchivePath(this.oldLogDir, file.getPath());
|
||||
if (!fs.rename(file.getPath(),p)) {
|
||||
throw new IOException("Unable to rename " + file.getPath() + " to " + p);
|
||||
}
|
||||
}
|
||||
LOG.debug("Moved " + files.length + " log files to " +
|
||||
FSUtils.getPath(this.oldLogDir));
|
||||
fs.delete(dir, true);
|
||||
if (!fs.delete(dir, true)) {
|
||||
LOG.info("Unable to delete " + dir);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -153,11 +153,16 @@ public class HLogSplitter {
|
|||
Path newPath = HLog.getHLogArchivePath(oldLogDir, file.getPath());
|
||||
LOG.info("Moving " + FSUtils.getPath(file.getPath()) + " to "
|
||||
+ FSUtils.getPath(newPath));
|
||||
fs.rename(file.getPath(), newPath);
|
||||
if (!fs.rename(file.getPath(), newPath)) {
|
||||
throw new IOException("Unable to rename " + file.getPath() +
|
||||
" to " + newPath);
|
||||
}
|
||||
}
|
||||
LOG.debug("Moved " + files.length + " log files to "
|
||||
+ FSUtils.getPath(oldLogDir));
|
||||
fs.delete(srcDir, true);
|
||||
if (!fs.delete(srcDir, true)) {
|
||||
throw new IOException("Unable to delete " + srcDir);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
IOException io = new IOException("Cannot delete: " + srcDir);
|
||||
|
@ -356,19 +361,27 @@ public class HLogSplitter {
|
|||
final Path corruptDir = new Path(conf.get(HConstants.HBASE_DIR), conf.get(
|
||||
"hbase.regionserver.hlog.splitlog.corrupt.dir", ".corrupt"));
|
||||
|
||||
fs.mkdirs(corruptDir);
|
||||
if (!fs.mkdirs(corruptDir)) {
|
||||
LOG.info("Unable to mkdir " + corruptDir);
|
||||
}
|
||||
fs.mkdirs(oldLogDir);
|
||||
|
||||
for (Path corrupted : corruptedLogs) {
|
||||
Path p = new Path(corruptDir, corrupted.getName());
|
||||
LOG.info("Moving corrupted log " + corrupted + " to " + p);
|
||||
fs.rename(corrupted, p);
|
||||
if (!fs.rename(corrupted, p)) {
|
||||
LOG.info("Unable to move corrupted log " + corrupted + " to " + p);
|
||||
} else {
|
||||
LOG.info("Moving corrupted log " + corrupted + " to " + p);
|
||||
}
|
||||
}
|
||||
|
||||
for (Path p : processedLogs) {
|
||||
Path newPath = HLog.getHLogArchivePath(oldLogDir, p);
|
||||
fs.rename(p, newPath);
|
||||
LOG.info("Archived processed log " + p + " to " + newPath);
|
||||
if (!fs.rename(p, newPath)) {
|
||||
LOG.info("Unable to move " + p + " to " + newPath);
|
||||
} else {
|
||||
LOG.info("Archived processed log " + p + " to " + newPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue