HADOOP-1780 Regions are still being doubly assigned

M src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
    Fix outputing fail message on each compaction though there was none.
M  src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
    (rename): Refactor so return only happens on end..
M  src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
    (assignRegions): Make synchronized.  In presence of concurrent visits
    by regionservers, both visiting threads could grab same set of regions
    for assignment.



git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@569589 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-08-25 00:42:45 +00:00
parent 07224fe303
commit e9aafde1f1
4 changed files with 16 additions and 14 deletions

View File

@ -20,6 +20,7 @@ Trunk (unreleased changes)
HADOOP-1747 On a cluster, on restart, regions multiply assigned HADOOP-1747 On a cluster, on restart, regions multiply assigned
HADOOP-1776 Fix for sporadic compaction failures closing and moving HADOOP-1776 Fix for sporadic compaction failures closing and moving
compaction result compaction result
HADOOP-1780 Regions are still being doubly assigned
IMPROVEMENTS IMPROVEMENTS
HADOOP-1737 Make HColumnDescriptor data publically members settable HADOOP-1737 Make HColumnDescriptor data publically members settable

View File

@ -1468,7 +1468,7 @@ HMasterRegionInterface, Runnable {
* @param serverName * @param serverName
* @param returnMsgs * @param returnMsgs
*/ */
private void assignRegions(HServerInfo info, String serverName, private synchronized void assignRegions(HServerInfo info, String serverName,
ArrayList<HMsg> returnMsgs) { ArrayList<HMsg> returnMsgs) {
TreeSet<Text> regionsToAssign = getRegionsToAssign(); TreeSet<Text> regionsToAssign = getRegionsToAssign();

View File

@ -923,7 +923,7 @@ class HStore implements HConstants {
if (!fs.exists(doneFile)) { if (!fs.exists(doneFile)) {
// The last execution didn't finish the compaction, so there's nothing // The last execution didn't finish the compaction, so there's nothing
// we can do. We'll just have to redo it. Abandon it and return. // we can do. We'll just have to redo it. Abandon it and return.
LOG.warn("Redoing a failed compaction"); LOG.warn("Redo failed compaction (missing 'done' file)");
return; return;
} }
@ -986,10 +986,11 @@ class HStore implements HConstants {
this.readers.put(orderVal, this.readers.put(orderVal,
finalCompactedFile.getReader(this.fs, this.bloomFilter)); finalCompactedFile.getReader(this.fs, this.bloomFilter));
this.storefiles.put(orderVal, finalCompactedFile); this.storefiles.put(orderVal, finalCompactedFile);
} finally { } catch (IOException e) {
LOG.warn("Failed replacing compacted files. Compacted fle is " + LOG.error("Failed replacing compacted files. Compacted file is " +
finalCompactedFile.toString() + ". Files replaced are " + finalCompactedFile.toString() + ". Files replaced are " +
toCompactFiles.toString() + " some of which may have been removed"); toCompactFiles.toString() +
" some of which may have been already removed", e);
} }
} finally { } finally {
// 7. Releasing the write-lock // 7. Releasing the write-lock

View File

@ -644,8 +644,7 @@ public class HStoreFile implements HConstants, WritableComparable {
boolean success = fs.rename(src, hsf.getMapFilePath()); boolean success = fs.rename(src, hsf.getMapFilePath());
if (!success) { if (!success) {
LOG.warn("Failed rename of " + src + " to " + hsf.getMapFilePath()); LOG.warn("Failed rename of " + src + " to " + hsf.getMapFilePath());
return success; } else {
}
src = getInfoFilePath(); src = getInfoFilePath();
if (!fs.exists(src)) { if (!fs.exists(src)) {
throw new FileNotFoundException(src.toString()); throw new FileNotFoundException(src.toString());
@ -654,6 +653,7 @@ public class HStoreFile implements HConstants, WritableComparable {
if (!success) { if (!success) {
LOG.warn("Failed rename of " + src + " to " + hsf.getInfoFilePath()); LOG.warn("Failed rename of " + src + " to " + hsf.getInfoFilePath());
} }
}
return success; return success;
} }