diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 4495ef97736..8f061a54303 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1174,7 +1174,7 @@ public class HStore implements Store { * Since we already have this data, this will be idempotent but we will have a redundant * copy of the data. * - If RS fails between 2 and 3, the region will have a redundant copy of the data. The - * RS that failed won't be able to finish sync() for WAL because of lease recovery in WAL. + * RS that failed won't be able to finish snyc() for WAL because of lease recovery in WAL. * - If RS fails after 3, the region region server who opens the region will pick up the * the compaction marker from the WAL and replay it by removing the compaction input files. * Failed RS can also attempt to delete those files, but the operation will be idempotent @@ -1218,7 +1218,7 @@ public class HStore implements Store { + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1)); // Commence the compaction. - List newFiles = compaction.compact(throughputController, user, region.lock.readLock()); + List newFiles = compaction.compact(throughputController, user); // TODO: get rid of this! if (!this.conf.getBoolean("hbase.hstore.compaction.complete", true)) { @@ -1232,24 +1232,19 @@ public class HStore implements Store { } return sfs; } - // Do the steps necessary to complete the compaction. Hold region open for these operations. - region.lock.readLock().lock(); - try { - sfs = moveCompatedFilesIntoPlace(cr, newFiles, user); - writeCompactionWalRecord(filesToCompact, sfs); - replaceStoreFiles(filesToCompact, sfs); - if (cr.isMajor()) { - majorCompactedCellsCount += getCompactionProgress().totalCompactingKVs; - majorCompactedCellsSize += getCompactionProgress().totalCompactedSize; - } else { - compactedCellsCount += getCompactionProgress().totalCompactingKVs; - compactedCellsSize += getCompactionProgress().totalCompactedSize; - } - // At this point the store will use new files for all new scanners. - completeCompaction(filesToCompact, true); // Archive old files & update store size. - } finally { - region.lock.readLock().unlock(); + // Do the steps necessary to complete the compaction. + sfs = moveCompatedFilesIntoPlace(cr, newFiles, user); + writeCompactionWalRecord(filesToCompact, sfs); + replaceStoreFiles(filesToCompact, sfs); + if (cr.isMajor()) { + majorCompactedCellsCount += getCompactionProgress().totalCompactingKVs; + majorCompactedCellsSize += getCompactionProgress().totalCompactedSize; + } else { + compactedCellsCount += getCompactionProgress().totalCompactingKVs; + compactedCellsSize += getCompactionProgress().totalCompactedSize; } + // At this point the store will use new files for all new scanners. + completeCompaction(filesToCompact, true); // Archive old files & update store size. logCompactionEndMessage(cr, sfs, compactionStartTime); return sfs; @@ -1448,7 +1443,6 @@ public class HStore implements Store { * but instead makes a compaction candidate list by itself. * @param N Number of files. */ - @VisibleForTesting public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException { List filesToCompact; boolean isMajor; @@ -2129,11 +2123,7 @@ public class HStore implements Store { public long getTotalStaticIndexSize() { long size = 0; for (StoreFile s : this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); - if (r == null) { - continue; - } - size += r.getUncompressedDataIndexSize(); + size += s.getReader().getUncompressedDataIndexSize(); } return size; } @@ -2143,9 +2133,6 @@ public class HStore implements Store { long size = 0; for (StoreFile s : this.storeEngine.getStoreFileManager().getStorefiles()) { StoreFile.Reader r = s.getReader(); - if (r == null) { - continue; - } size += r.getTotalBloomSize(); } return size;