From 0789e15b5e34dcafd6adfb08b4474ea1b4628265 Mon Sep 17 00:00:00 2001 From: zhangduo Date: Wed, 27 Jun 2018 16:45:53 +0800 Subject: [PATCH] HBASE-20790 Fix the style issues on branch HBASE-19064 before merging back to master --- .../hbase/client/RawAsyncHBaseAdmin.java | 26 ++- .../replication/ReplicationPeerConfig.java | 1 + .../hadoop/hbase/regionserver/HRegion.java | 148 +++++++++--------- .../TestSyncReplicationStandBy.java | 3 +- 4 files changed, 89 insertions(+), 89 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 963cca794ee..6dc0555210e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -1594,30 +1594,28 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture getReplicationPeerConfig(String peerId) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . call( - controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId), ( - s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), - (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))).call(); + return this. newMasterCaller().action((controller, stub) -> this + . call( + controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId), + (s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), + (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))) + .call(); } @Override public CompletableFuture updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) { return this - . procedureCall( - RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig), - (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done), - (resp) -> resp.getProcId(), - new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG")); + . procedureCall( + RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig), + (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done), + (resp) -> resp.getProcId(), + new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG")); } @Override public CompletableFuture transitReplicationPeerSyncReplicationState(String peerId, - SyncReplicationState clusterState) { + SyncReplicationState clusterState) { return this . procedureCall( RequestConverter.buildTransitReplicationPeerSyncReplicationStateRequest(peerId, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index cc7b4bc65b4..86c829b3df6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -344,6 +344,7 @@ public class ReplicationPeerConfig { return this; } + @Override public ReplicationPeerConfigBuilder setRemoteWALDir(String dir) { this.remoteWALDir = dir; return this; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index e3c2ca3cb56..7771ad0476d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2001,6 +2001,80 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return false; } + /** + * We are trying to remove / relax the region read lock for compaction. + * Let's see what are the potential race conditions among the operations (user scan, + * region split, region close and region bulk load). + * + * user scan ---> region read lock + * region split --> region close first --> region write lock + * region close --> region write lock + * region bulk load --> region write lock + * + * read lock is compatible with read lock. ---> no problem with user scan/read + * region bulk load does not cause problem for compaction (no consistency problem, store lock + * will help the store file accounting). + * They can run almost concurrently at the region level. + * + * The only remaining race condition is between the region close and compaction. + * So we will evaluate, below, how region close intervenes with compaction if compaction does + * not acquire region read lock. + * + * Here are the steps for compaction: + * 1. obtain list of StoreFile's + * 2. create StoreFileScanner's based on list from #1 + * 3. perform compaction and save resulting files under tmp dir + * 4. swap in compacted files + * + * #1 is guarded by store lock. This patch does not change this --> no worse or better + * For #2, we obtain smallest read point (for region) across all the Scanners (for both default + * compactor and stripe compactor). + * The read points are for user scans. Region keeps the read points for all currently open + * user scanners. + * Compaction needs to know the smallest read point so that during re-write of the hfiles, + * it can remove the mvcc points for the cells if their mvccs are older than the smallest + * since they are not needed anymore. + * This will not conflict with compaction. + * For #3, it can be performed in parallel to other operations. + * For #4 bulk load and compaction don't conflict with each other on the region level + * (for multi-family atomicy). + * Region close and compaction are guarded pretty well by the 'writestate'. + * In HRegion#doClose(), we have : + * synchronized (writestate) { + * // Disable compacting and flushing by background threads for this + * // region. + * canFlush = !writestate.readOnly; + * writestate.writesEnabled = false; + * LOG.debug("Closing " + this + ": disabling compactions & flushes"); + * waitForFlushesAndCompactions(); + * } + * waitForFlushesAndCompactions() would wait for writestate.compacting to come down to 0. + * and in HRegion.compact() + * try { + * synchronized (writestate) { + * if (writestate.writesEnabled) { + * wasStateSet = true; + * ++writestate.compacting; + * } else { + * String msg = "NOT compacting region " + this + ". Writes disabled."; + * LOG.info(msg); + * status.abort(msg); + * return false; + * } + * } + * Also in compactor.performCompaction(): + * check periodically to see if a system stop is requested + * if (closeCheckInterval > 0) { + * bytesWritten += len; + * if (bytesWritten > closeCheckInterval) { + * bytesWritten = 0; + * if (!store.areWritesEnabled()) { + * progress.cancel(); + * return false; + * } + * } + * } + */ public boolean compact(CompactionContext compaction, HStore store, ThroughputController throughputController, User user) throws IOException { assert compaction != null && compaction.hasSelection(); @@ -2021,80 +2095,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi MonitoredTask status = null; boolean requestNeedsCancellation = true; - /* - * We are trying to remove / relax the region read lock for compaction. - * Let's see what are the potential race conditions among the operations (user scan, - * region split, region close and region bulk load). - * - * user scan ---> region read lock - * region split --> region close first --> region write lock - * region close --> region write lock - * region bulk load --> region write lock - * - * read lock is compatible with read lock. ---> no problem with user scan/read - * region bulk load does not cause problem for compaction (no consistency problem, store lock - * will help the store file accounting). - * They can run almost concurrently at the region level. - * - * The only remaining race condition is between the region close and compaction. - * So we will evaluate, below, how region close intervenes with compaction if compaction does - * not acquire region read lock. - * - * Here are the steps for compaction: - * 1. obtain list of StoreFile's - * 2. create StoreFileScanner's based on list from #1 - * 3. perform compaction and save resulting files under tmp dir - * 4. swap in compacted files - * - * #1 is guarded by store lock. This patch does not change this --> no worse or better - * For #2, we obtain smallest read point (for region) across all the Scanners (for both default - * compactor and stripe compactor). - * The read points are for user scans. Region keeps the read points for all currently open - * user scanners. - * Compaction needs to know the smallest read point so that during re-write of the hfiles, - * it can remove the mvcc points for the cells if their mvccs are older than the smallest - * since they are not needed anymore. - * This will not conflict with compaction. - * For #3, it can be performed in parallel to other operations. - * For #4 bulk load and compaction don't conflict with each other on the region level - * (for multi-family atomicy). - * Region close and compaction are guarded pretty well by the 'writestate'. - * In HRegion#doClose(), we have : - * synchronized (writestate) { - * // Disable compacting and flushing by background threads for this - * // region. - * canFlush = !writestate.readOnly; - * writestate.writesEnabled = false; - * LOG.debug("Closing " + this + ": disabling compactions & flushes"); - * waitForFlushesAndCompactions(); - * } - * waitForFlushesAndCompactions() would wait for writestate.compacting to come down to 0. - * and in HRegion.compact() - * try { - * synchronized (writestate) { - * if (writestate.writesEnabled) { - * wasStateSet = true; - * ++writestate.compacting; - * } else { - * String msg = "NOT compacting region " + this + ". Writes disabled."; - * LOG.info(msg); - * status.abort(msg); - * return false; - * } - * } - * Also in compactor.performCompaction(): - * check periodically to see if a system stop is requested - * if (closeCheckInterval > 0) { - * bytesWritten += len; - * if (bytesWritten > closeCheckInterval) { - * bytesWritten = 0; - * if (!store.areWritesEnabled()) { - * progress.cancel(); - * return false; - * } - * } - * } - */ try { byte[] cf = Bytes.toBytes(store.getColumnFamilyName()); if (stores.get(cf) != store) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java index de409fce83a..3bfd9a8a481 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java @@ -21,10 +21,10 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -62,6 +62,7 @@ public class TestSyncReplicationStandBy extends SyncReplicationTestBase { private void assertDisallow(Table table, TableAction action) throws IOException { try { action.call(table); + fail("Should not allow the action"); } catch (DoNotRetryIOException | RetriesExhaustedException e) { // expected assertThat(e.getMessage(), containsString("STANDBY"));