HADOOP-1780 Regions are still being doubly assigned
M src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java Fix outputing fail message on each compaction though there was none. M src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java (rename): Refactor so return only happens on end.. M src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (assignRegions): Make synchronized. In presence of concurrent visits by regionservers, both visiting threads could grab same set of regions for assignment. git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@569589 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
07224fe303
commit
e9aafde1f1
|
@ -20,6 +20,7 @@ Trunk (unreleased changes)
|
|||
HADOOP-1747 On a cluster, on restart, regions multiply assigned
|
||||
HADOOP-1776 Fix for sporadic compaction failures closing and moving
|
||||
compaction result
|
||||
HADOOP-1780 Regions are still being doubly assigned
|
||||
|
||||
IMPROVEMENTS
|
||||
HADOOP-1737 Make HColumnDescriptor data publically members settable
|
||||
|
|
|
@ -1468,7 +1468,7 @@ HMasterRegionInterface, Runnable {
|
|||
* @param serverName
|
||||
* @param returnMsgs
|
||||
*/
|
||||
private void assignRegions(HServerInfo info, String serverName,
|
||||
private synchronized void assignRegions(HServerInfo info, String serverName,
|
||||
ArrayList<HMsg> returnMsgs) {
|
||||
|
||||
TreeSet<Text> regionsToAssign = getRegionsToAssign();
|
||||
|
|
|
@ -923,7 +923,7 @@ class HStore implements HConstants {
|
|||
if (!fs.exists(doneFile)) {
|
||||
// The last execution didn't finish the compaction, so there's nothing
|
||||
// we can do. We'll just have to redo it. Abandon it and return.
|
||||
LOG.warn("Redoing a failed compaction");
|
||||
LOG.warn("Redo failed compaction (missing 'done' file)");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -986,10 +986,11 @@ class HStore implements HConstants {
|
|||
this.readers.put(orderVal,
|
||||
finalCompactedFile.getReader(this.fs, this.bloomFilter));
|
||||
this.storefiles.put(orderVal, finalCompactedFile);
|
||||
} finally {
|
||||
LOG.warn("Failed replacing compacted files. Compacted fle is " +
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed replacing compacted files. Compacted file is " +
|
||||
finalCompactedFile.toString() + ". Files replaced are " +
|
||||
toCompactFiles.toString() + " some of which may have been removed");
|
||||
toCompactFiles.toString() +
|
||||
" some of which may have been already removed", e);
|
||||
}
|
||||
} finally {
|
||||
// 7. Releasing the write-lock
|
||||
|
|
|
@ -644,15 +644,15 @@ public class HStoreFile implements HConstants, WritableComparable {
|
|||
boolean success = fs.rename(src, hsf.getMapFilePath());
|
||||
if (!success) {
|
||||
LOG.warn("Failed rename of " + src + " to " + hsf.getMapFilePath());
|
||||
return success;
|
||||
}
|
||||
src = getInfoFilePath();
|
||||
if (!fs.exists(src)) {
|
||||
throw new FileNotFoundException(src.toString());
|
||||
}
|
||||
success = fs.rename(src, hsf.getInfoFilePath());
|
||||
if (!success) {
|
||||
LOG.warn("Failed rename of " + src + " to " + hsf.getInfoFilePath());
|
||||
} else {
|
||||
src = getInfoFilePath();
|
||||
if (!fs.exists(src)) {
|
||||
throw new FileNotFoundException(src.toString());
|
||||
}
|
||||
success = fs.rename(src, hsf.getInfoFilePath());
|
||||
if (!success) {
|
||||
LOG.warn("Failed rename of " + src + " to " + hsf.getInfoFilePath());
|
||||
}
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue