HBASE-20790 Fix the style issues on branch HBASE-19064 before merging back to master

This commit is contained in:
zhangduo 2018-06-27 16:45:53 +08:00
parent a84cdbd579
commit 0789e15b5e
4 changed files with 89 additions and 89 deletions

View File

@ -1594,30 +1594,28 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
@Override @Override
public CompletableFuture<ReplicationPeerConfig> getReplicationPeerConfig(String peerId) { public CompletableFuture<ReplicationPeerConfig> getReplicationPeerConfig(String peerId) {
return this return this.<ReplicationPeerConfig> newMasterCaller().action((controller, stub) -> this
.<ReplicationPeerConfig> newMasterCaller() .<GetReplicationPeerConfigRequest, GetReplicationPeerConfigResponse, ReplicationPeerConfig> call(
.action( controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId),
(controller, stub) -> this (s, c, req, done) -> s.getReplicationPeerConfig(c, req, done),
.<GetReplicationPeerConfigRequest, GetReplicationPeerConfigResponse, ReplicationPeerConfig> call( (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig())))
controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId), ( .call();
s, c, req, done) -> s.getReplicationPeerConfig(c, req, done),
(resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))).call();
} }
@Override @Override
public CompletableFuture<Void> updateReplicationPeerConfig(String peerId, public CompletableFuture<Void> updateReplicationPeerConfig(String peerId,
ReplicationPeerConfig peerConfig) { ReplicationPeerConfig peerConfig) {
return this return this
.<UpdateReplicationPeerConfigRequest, UpdateReplicationPeerConfigResponse> procedureCall( .<UpdateReplicationPeerConfigRequest, UpdateReplicationPeerConfigResponse> procedureCall(
RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig), RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig),
(s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done), (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done),
(resp) -> resp.getProcId(), (resp) -> resp.getProcId(),
new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG")); new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG"));
} }
@Override @Override
public CompletableFuture<Void> transitReplicationPeerSyncReplicationState(String peerId, public CompletableFuture<Void> transitReplicationPeerSyncReplicationState(String peerId,
SyncReplicationState clusterState) { SyncReplicationState clusterState) {
return this return this
.<TransitReplicationPeerSyncReplicationStateRequest, TransitReplicationPeerSyncReplicationStateResponse> procedureCall( .<TransitReplicationPeerSyncReplicationStateRequest, TransitReplicationPeerSyncReplicationStateResponse> procedureCall(
RequestConverter.buildTransitReplicationPeerSyncReplicationStateRequest(peerId, RequestConverter.buildTransitReplicationPeerSyncReplicationStateRequest(peerId,

View File

@ -344,6 +344,7 @@ public class ReplicationPeerConfig {
return this; return this;
} }
@Override
public ReplicationPeerConfigBuilder setRemoteWALDir(String dir) { public ReplicationPeerConfigBuilder setRemoteWALDir(String dir) {
this.remoteWALDir = dir; this.remoteWALDir = dir;
return this; return this;

View File

@ -2001,6 +2001,80 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return false; return false;
} }
/**
* We are trying to remove / relax the region read lock for compaction.
* Let's see what are the potential race conditions among the operations (user scan,
* region split, region close and region bulk load).
*
* user scan ---> region read lock
* region split --> region close first --> region write lock
* region close --> region write lock
* region bulk load --> region write lock
*
* read lock is compatible with read lock. ---> no problem with user scan/read
* region bulk load does not cause problem for compaction (no consistency problem, store lock
* will help the store file accounting).
* They can run almost concurrently at the region level.
*
* The only remaining race condition is between the region close and compaction.
* So we will evaluate, below, how region close intervenes with compaction if compaction does
* not acquire region read lock.
*
* Here are the steps for compaction:
* 1. obtain list of StoreFile's
* 2. create StoreFileScanner's based on list from #1
* 3. perform compaction and save resulting files under tmp dir
* 4. swap in compacted files
*
* #1 is guarded by store lock. This patch does not change this --> no worse or better
* For #2, we obtain smallest read point (for region) across all the Scanners (for both default
* compactor and stripe compactor).
* The read points are for user scans. Region keeps the read points for all currently open
* user scanners.
* Compaction needs to know the smallest read point so that during re-write of the hfiles,
* it can remove the mvcc points for the cells if their mvccs are older than the smallest
* since they are not needed anymore.
* This will not conflict with compaction.
* For #3, it can be performed in parallel to other operations.
* For #4 bulk load and compaction don't conflict with each other on the region level
* (for multi-family atomicy).
* Region close and compaction are guarded pretty well by the 'writestate'.
* In HRegion#doClose(), we have :
* synchronized (writestate) {
* // Disable compacting and flushing by background threads for this
* // region.
* canFlush = !writestate.readOnly;
* writestate.writesEnabled = false;
* LOG.debug("Closing " + this + ": disabling compactions & flushes");
* waitForFlushesAndCompactions();
* }
* waitForFlushesAndCompactions() would wait for writestate.compacting to come down to 0.
* and in HRegion.compact()
* try {
* synchronized (writestate) {
* if (writestate.writesEnabled) {
* wasStateSet = true;
* ++writestate.compacting;
* } else {
* String msg = "NOT compacting region " + this + ". Writes disabled.";
* LOG.info(msg);
* status.abort(msg);
* return false;
* }
* }
* Also in compactor.performCompaction():
* check periodically to see if a system stop is requested
* if (closeCheckInterval > 0) {
* bytesWritten += len;
* if (bytesWritten > closeCheckInterval) {
* bytesWritten = 0;
* if (!store.areWritesEnabled()) {
* progress.cancel();
* return false;
* }
* }
* }
*/
public boolean compact(CompactionContext compaction, HStore store, public boolean compact(CompactionContext compaction, HStore store,
ThroughputController throughputController, User user) throws IOException { ThroughputController throughputController, User user) throws IOException {
assert compaction != null && compaction.hasSelection(); assert compaction != null && compaction.hasSelection();
@ -2021,80 +2095,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
MonitoredTask status = null; MonitoredTask status = null;
boolean requestNeedsCancellation = true; boolean requestNeedsCancellation = true;
/*
* We are trying to remove / relax the region read lock for compaction.
* Let's see what are the potential race conditions among the operations (user scan,
* region split, region close and region bulk load).
*
* user scan ---> region read lock
* region split --> region close first --> region write lock
* region close --> region write lock
* region bulk load --> region write lock
*
* read lock is compatible with read lock. ---> no problem with user scan/read
* region bulk load does not cause problem for compaction (no consistency problem, store lock
* will help the store file accounting).
* They can run almost concurrently at the region level.
*
* The only remaining race condition is between the region close and compaction.
* So we will evaluate, below, how region close intervenes with compaction if compaction does
* not acquire region read lock.
*
* Here are the steps for compaction:
* 1. obtain list of StoreFile's
* 2. create StoreFileScanner's based on list from #1
* 3. perform compaction and save resulting files under tmp dir
* 4. swap in compacted files
*
* #1 is guarded by store lock. This patch does not change this --> no worse or better
* For #2, we obtain smallest read point (for region) across all the Scanners (for both default
* compactor and stripe compactor).
* The read points are for user scans. Region keeps the read points for all currently open
* user scanners.
* Compaction needs to know the smallest read point so that during re-write of the hfiles,
* it can remove the mvcc points for the cells if their mvccs are older than the smallest
* since they are not needed anymore.
* This will not conflict with compaction.
* For #3, it can be performed in parallel to other operations.
* For #4 bulk load and compaction don't conflict with each other on the region level
* (for multi-family atomicy).
* Region close and compaction are guarded pretty well by the 'writestate'.
* In HRegion#doClose(), we have :
* synchronized (writestate) {
* // Disable compacting and flushing by background threads for this
* // region.
* canFlush = !writestate.readOnly;
* writestate.writesEnabled = false;
* LOG.debug("Closing " + this + ": disabling compactions & flushes");
* waitForFlushesAndCompactions();
* }
* waitForFlushesAndCompactions() would wait for writestate.compacting to come down to 0.
* and in HRegion.compact()
* try {
* synchronized (writestate) {
* if (writestate.writesEnabled) {
* wasStateSet = true;
* ++writestate.compacting;
* } else {
* String msg = "NOT compacting region " + this + ". Writes disabled.";
* LOG.info(msg);
* status.abort(msg);
* return false;
* }
* }
* Also in compactor.performCompaction():
* check periodically to see if a system stop is requested
* if (closeCheckInterval > 0) {
* bytesWritten += len;
* if (bytesWritten > closeCheckInterval) {
* bytesWritten = 0;
* if (!store.areWritesEnabled()) {
* progress.cancel();
* return false;
* }
* }
* }
*/
try { try {
byte[] cf = Bytes.toBytes(store.getColumnFamilyName()); byte[] cf = Bytes.toBytes(store.getColumnFamilyName());
if (stores.get(cf) != store) { if (stores.get(cf) != store) {

View File

@ -21,10 +21,10 @@ import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat; import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
@ -62,6 +62,7 @@ public class TestSyncReplicationStandBy extends SyncReplicationTestBase {
private void assertDisallow(Table table, TableAction action) throws IOException { private void assertDisallow(Table table, TableAction action) throws IOException {
try { try {
action.call(table); action.call(table);
fail("Should not allow the action");
} catch (DoNotRetryIOException | RetriesExhaustedException e) { } catch (DoNotRetryIOException | RetriesExhaustedException e) {
// expected // expected
assertThat(e.getMessage(), containsString("STANDBY")); assertThat(e.getMessage(), containsString("STANDBY"));