HBASE-18298 RegionServerServices Interface cleanup for CP expose.
This commit is contained in:
parent
447b591b08
commit
0fcc84cadd
|
@ -19,7 +19,6 @@
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.yetus.audience.InterfaceStability;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface to support the aborting of a given server or client.
|
* Interface to support the aborting of a given server or client.
|
||||||
|
@ -29,8 +28,7 @@ import org.apache.yetus.audience.InterfaceStability;
|
||||||
* <p>
|
* <p>
|
||||||
* Implemented by the Master, RegionServer, and TableServers (client).
|
* Implemented by the Master, RegionServer, and TableServers (client).
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
|
||||||
public interface Abortable {
|
public interface Abortable {
|
||||||
/**
|
/**
|
||||||
* Abort the server or client.
|
* Abort the server or client.
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Re
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesResponse;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadService;
|
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadService;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager;
|
import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager;
|
||||||
|
|
||||||
import com.google.protobuf.RpcCallback;
|
import com.google.protobuf.RpcCallback;
|
||||||
|
@ -62,10 +63,13 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService
|
||||||
private static final Log LOG = LogFactory.getLog(SecureBulkLoadEndpoint.class);
|
private static final Log LOG = LogFactory.getLog(SecureBulkLoadEndpoint.class);
|
||||||
|
|
||||||
private RegionCoprocessorEnvironment env;
|
private RegionCoprocessorEnvironment env;
|
||||||
|
private RegionServerServices rsServices;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void start(CoprocessorEnvironment env) {
|
public void start(CoprocessorEnvironment env) {
|
||||||
this.env = (RegionCoprocessorEnvironment)env;
|
this.env = (RegionCoprocessorEnvironment)env;
|
||||||
|
assert this.env.getCoprocessorRegionServerServices() instanceof RegionServerServices;
|
||||||
|
rsServices = (RegionServerServices) this.env.getCoprocessorRegionServerServices();
|
||||||
LOG.warn("SecureBulkLoadEndpoint is deprecated. It will be removed in future releases.");
|
LOG.warn("SecureBulkLoadEndpoint is deprecated. It will be removed in future releases.");
|
||||||
LOG.warn("Secure bulk load has been integrated into HBase core.");
|
LOG.warn("Secure bulk load has been integrated into HBase core.");
|
||||||
}
|
}
|
||||||
|
@ -78,8 +82,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService
|
||||||
public void prepareBulkLoad(RpcController controller, PrepareBulkLoadRequest request,
|
public void prepareBulkLoad(RpcController controller, PrepareBulkLoadRequest request,
|
||||||
RpcCallback<PrepareBulkLoadResponse> done) {
|
RpcCallback<PrepareBulkLoadResponse> done) {
|
||||||
try {
|
try {
|
||||||
SecureBulkLoadManager secureBulkLoadManager =
|
SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager();
|
||||||
this.env.getRegionServerServices().getSecureBulkLoadManager();
|
|
||||||
|
|
||||||
String bulkToken = secureBulkLoadManager.prepareBulkLoad(this.env.getRegion(),
|
String bulkToken = secureBulkLoadManager.prepareBulkLoad(this.env.getRegion(),
|
||||||
convert(request));
|
convert(request));
|
||||||
|
@ -106,8 +109,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService
|
||||||
public void cleanupBulkLoad(RpcController controller, CleanupBulkLoadRequest request,
|
public void cleanupBulkLoad(RpcController controller, CleanupBulkLoadRequest request,
|
||||||
RpcCallback<CleanupBulkLoadResponse> done) {
|
RpcCallback<CleanupBulkLoadResponse> done) {
|
||||||
try {
|
try {
|
||||||
SecureBulkLoadManager secureBulkLoadManager =
|
SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager();
|
||||||
this.env.getRegionServerServices().getSecureBulkLoadManager();
|
|
||||||
secureBulkLoadManager.cleanupBulkLoad(this.env.getRegion(), convert(request));
|
secureBulkLoadManager.cleanupBulkLoad(this.env.getRegion(), convert(request));
|
||||||
done.run(CleanupBulkLoadResponse.newBuilder().build());
|
done.run(CleanupBulkLoadResponse.newBuilder().build());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
@ -138,8 +140,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService
|
||||||
boolean loaded = false;
|
boolean loaded = false;
|
||||||
Map<byte[], List<Path>> map = null;
|
Map<byte[], List<Path>> map = null;
|
||||||
try {
|
try {
|
||||||
SecureBulkLoadManager secureBulkLoadManager =
|
SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager();
|
||||||
this.env.getRegionServerServices().getSecureBulkLoadManager();
|
|
||||||
BulkLoadHFileRequest bulkLoadHFileRequest = ConvertSecureBulkLoadHFilesRequest(request);
|
BulkLoadHFileRequest bulkLoadHFileRequest = ConvertSecureBulkLoadHFilesRequest(request);
|
||||||
map = secureBulkLoadManager.secureBulkLoadHFiles(this.env.getRegion(),
|
map = secureBulkLoadManager.secureBulkLoadHFiles(this.env.getRegion(),
|
||||||
convert(bulkLoadHFileRequest));
|
convert(bulkLoadHFileRequest));
|
||||||
|
|
|
@ -65,10 +65,17 @@ import org.apache.zookeeper.ZooKeeper;
|
||||||
* listeners registered with ZooKeeperWatcher cannot be removed.
|
* listeners registered with ZooKeeperWatcher cannot be removed.
|
||||||
*/
|
*/
|
||||||
public class ZooKeeperScanPolicyObserver implements RegionObserver {
|
public class ZooKeeperScanPolicyObserver implements RegionObserver {
|
||||||
|
// The zk ensemble info is put in hbase config xml with given custom key.
|
||||||
|
public static final String ZK_ENSEMBLE_KEY = "ZooKeeperScanPolicyObserver.zookeeper.ensemble";
|
||||||
|
public static final String ZK_SESSION_TIMEOUT_KEY =
|
||||||
|
"ZooKeeperScanPolicyObserver.zookeeper.session.timeout";
|
||||||
|
public static final int ZK_SESSION_TIMEOUT_DEFAULT = 30 * 1000; // 30 secs
|
||||||
public static final String node = "/backup/example/lastbackup";
|
public static final String node = "/backup/example/lastbackup";
|
||||||
public static final String zkkey = "ZK";
|
public static final String zkkey = "ZK";
|
||||||
private static final Log LOG = LogFactory.getLog(ZooKeeperScanPolicyObserver.class);
|
private static final Log LOG = LogFactory.getLog(ZooKeeperScanPolicyObserver.class);
|
||||||
|
|
||||||
|
private ZooKeeper zk = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Internal watcher that keep "data" up to date asynchronously.
|
* Internal watcher that keep "data" up to date asynchronously.
|
||||||
*/
|
*/
|
||||||
|
@ -165,8 +172,22 @@ public class ZooKeeperScanPolicyObserver implements RegionObserver {
|
||||||
if (!re.getSharedData().containsKey(zkkey)) {
|
if (!re.getSharedData().containsKey(zkkey)) {
|
||||||
// there is a short race here
|
// there is a short race here
|
||||||
// in the worst case we create a watcher that will be notified once
|
// in the worst case we create a watcher that will be notified once
|
||||||
re.getSharedData().putIfAbsent(zkkey, new ZKWatcher(
|
String ensemble = re.getConfiguration().get(ZK_ENSEMBLE_KEY);
|
||||||
re.getRegionServerServices().getZooKeeper().getRecoverableZooKeeper().getZooKeeper()));
|
int sessionTimeout = re.getConfiguration().getInt(ZK_SESSION_TIMEOUT_KEY,
|
||||||
|
ZK_SESSION_TIMEOUT_DEFAULT);
|
||||||
|
this.zk = new ZooKeeper(ensemble, sessionTimeout, null);
|
||||||
|
re.getSharedData().putIfAbsent(zkkey, new ZKWatcher(zk));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void stop(CoprocessorEnvironment env) throws IOException {
|
||||||
|
if (this.zk != null) {
|
||||||
|
try {
|
||||||
|
this.zk.close();
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
LOG.error("Excepion while closing the ZK connection!", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -740,7 +740,7 @@ public class TestImportExport {
|
||||||
|
|
||||||
// Register the wal listener for the import table
|
// Register the wal listener for the import table
|
||||||
HRegionInfo region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
|
HRegionInfo region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
|
||||||
.getOnlineRegions(importTable.getName()).get(0).getRegionInfo();
|
.getRegions(importTable.getName()).get(0).getRegionInfo();
|
||||||
TableWALActionListener walListener = new TableWALActionListener(region);
|
TableWALActionListener walListener = new TableWALActionListener(region);
|
||||||
WAL wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region);
|
WAL wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region);
|
||||||
wal.registerWALActionsListener(walListener);
|
wal.registerWALActionsListener(walListener);
|
||||||
|
@ -759,7 +759,7 @@ public class TestImportExport {
|
||||||
importTableName = name.getMethodName() + "import2";
|
importTableName = name.getMethodName() + "import2";
|
||||||
importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3);
|
importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3);
|
||||||
region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
|
region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
|
||||||
.getOnlineRegions(importTable.getName()).get(0).getRegionInfo();
|
.getRegions(importTable.getName()).get(0).getRegionInfo();
|
||||||
wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region);
|
wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region);
|
||||||
walListener = new TableWALActionListener(region);
|
walListener = new TableWALActionListener(region);
|
||||||
wal.registerWALActionsListener(walListener);
|
wal.registerWALActionsListener(walListener);
|
||||||
|
|
|
@ -176,10 +176,10 @@ public class TestRSGroupsOfflineMode {
|
||||||
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
|
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
|
||||||
@Override
|
@Override
|
||||||
public boolean evaluate() throws Exception {
|
public boolean evaluate() throws Exception {
|
||||||
return failoverRS.getOnlineRegions(failoverTable).size() >= 1;
|
return failoverRS.getRegions(failoverTable).size() >= 1;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
Assert.assertEquals(0, failoverRS.getOnlineRegions(RSGroupInfoManager.RSGROUP_TABLE_NAME).size());
|
Assert.assertEquals(0, failoverRS.getRegions(RSGroupInfoManager.RSGROUP_TABLE_NAME).size());
|
||||||
|
|
||||||
// Need this for minicluster to shutdown cleanly.
|
// Need this for minicluster to shutdown cleanly.
|
||||||
master.stopMaster();
|
master.stopMaster();
|
||||||
|
|
|
@ -19,19 +19,17 @@
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.yetus.audience.InterfaceStability;
|
|
||||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Defines the set of shared functions implemented by HBase servers (Masters
|
* Defines the set of shared functions implemented by HBase servers (Masters
|
||||||
* and RegionServers).
|
* and RegionServers).
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
|
||||||
public interface Server extends Abortable, Stoppable {
|
public interface Server extends Abortable, Stoppable {
|
||||||
/**
|
/**
|
||||||
* Gets the configuration object for this server.
|
* Gets the configuration object for this server.
|
||||||
|
|
|
@ -25,8 +25,8 @@ import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.yetus.audience.InterfaceStability;
|
import org.apache.yetus.audience.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.CoprocessorRegionServerServices;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
|
||||||
import org.apache.hadoop.hbase.security.UserProvider;
|
import org.apache.hadoop.hbase.security.UserProvider;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -54,7 +54,7 @@ public class CoprocessorHConnection extends ConnectionImplementation {
|
||||||
// this bit is a little hacky - just trying to get it going for the moment
|
// this bit is a little hacky - just trying to get it going for the moment
|
||||||
if (env instanceof RegionCoprocessorEnvironment) {
|
if (env instanceof RegionCoprocessorEnvironment) {
|
||||||
RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
|
RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
|
||||||
RegionServerServices services = e.getRegionServerServices();
|
CoprocessorRegionServerServices services = e.getCoprocessorRegionServerServices();
|
||||||
if (services instanceof HRegionServer) {
|
if (services instanceof HRegionServer) {
|
||||||
return new CoprocessorHConnection((HRegionServer) services);
|
return new CoprocessorHConnection((HRegionServer) services);
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,8 +25,8 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.CoprocessorRegionServerServices;
|
||||||
import org.apache.hadoop.hbase.regionserver.Region;
|
import org.apache.hadoop.hbase.regionserver.Region;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.yetus.audience.InterfaceStability;
|
import org.apache.yetus.audience.InterfaceStability;
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment {
|
||||||
RegionInfo getRegionInfo();
|
RegionInfo getRegionInfo();
|
||||||
|
|
||||||
/** @return reference to the region server services */
|
/** @return reference to the region server services */
|
||||||
RegionServerServices getRegionServerServices();
|
CoprocessorRegionServerServices getCoprocessorRegionServerServices();
|
||||||
|
|
||||||
/** @return shared data between all instances of this coprocessor */
|
/** @return shared data between all instances of this coprocessor */
|
||||||
ConcurrentMap<String, Object> getSharedData();
|
ConcurrentMap<String, Object> getSharedData();
|
||||||
|
|
|
@ -19,16 +19,21 @@
|
||||||
package org.apache.hadoop.hbase.coprocessor;
|
package org.apache.hadoop.hbase.coprocessor;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||||
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
import org.apache.hadoop.hbase.regionserver.CoprocessorRegionServerServices;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.apache.yetus.audience.InterfaceStability;
|
||||||
|
|
||||||
|
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public interface RegionServerCoprocessorEnvironment extends CoprocessorEnvironment {
|
public interface RegionServerCoprocessorEnvironment extends CoprocessorEnvironment {
|
||||||
/**
|
/**
|
||||||
* Gets the region server services.
|
* Gets the region server services.
|
||||||
*
|
*
|
||||||
* @return the region server services
|
* @return the region server services
|
||||||
*/
|
*/
|
||||||
RegionServerServices getRegionServerServices();
|
CoprocessorRegionServerServices getCoprocessorRegionServerServices();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a MetricRegistry that can be used to track metrics at the region server level.
|
* Returns a MetricRegistry that can be used to track metrics at the region server level.
|
||||||
|
|
|
@ -175,7 +175,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private List<Region> getRegionsToFlush(String table) throws IOException {
|
private List<Region> getRegionsToFlush(String table) throws IOException {
|
||||||
return rss.getOnlineRegions(TableName.valueOf(table));
|
return rss.getRegions(TableName.valueOf(table));
|
||||||
}
|
}
|
||||||
|
|
||||||
public class FlushTableSubprocedureBuilder implements SubprocedureFactory {
|
public class FlushTableSubprocedureBuilder implements SubprocedureFactory {
|
||||||
|
|
|
@ -77,7 +77,7 @@ public class FileSystemUtilizationChore extends ScheduledChore {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
final Map<HRegionInfo,Long> onlineRegionSizes = new HashMap<>();
|
final Map<HRegionInfo,Long> onlineRegionSizes = new HashMap<>();
|
||||||
final Set<Region> onlineRegions = new HashSet<>(rs.getOnlineRegions());
|
final Set<Region> onlineRegions = new HashSet<>(rs.getRegions());
|
||||||
// Process the regions from the last run if we have any. If we are somehow having difficulty
|
// Process the regions from the last run if we have any. If we are somehow having difficulty
|
||||||
// processing the Regions, we want to avoid creating a backlog in memory of Region objs.
|
// processing the Regions, we want to avoid creating a backlog in memory of Region objs.
|
||||||
Iterator<Region> oldRegionsToProcess = getLeftoverRegions();
|
Iterator<Region> oldRegionsToProcess = getLeftoverRegions();
|
||||||
|
|
|
@ -89,7 +89,7 @@ public class CompactedHFilesDischarger extends ScheduledChore {
|
||||||
// Noop if rss is null. This will never happen in a normal condition except for cases
|
// Noop if rss is null. This will never happen in a normal condition except for cases
|
||||||
// when the test case is not spinning up a cluster
|
// when the test case is not spinning up a cluster
|
||||||
if (regionServerServices == null) return;
|
if (regionServerServices == null) return;
|
||||||
List<Region> onlineRegions = regionServerServices.getOnlineRegions();
|
List<Region> onlineRegions = regionServerServices.getRegions();
|
||||||
if (onlineRegions == null) return;
|
if (onlineRegions == null) return;
|
||||||
for (Region region : onlineRegions) {
|
for (Region region : onlineRegions) {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.apache.yetus.audience.InterfaceStability;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Services exposed to CPs by {@link HRegionServer}
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public interface CoprocessorRegionServerServices extends ImmutableOnlineRegions {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return True if this regionserver is stopping.
|
||||||
|
*/
|
||||||
|
boolean isStopping();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return Return the FileSystem object used by the regionserver
|
||||||
|
*/
|
||||||
|
FileSystem getFileSystem();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return all the online tables in this RS
|
||||||
|
*/
|
||||||
|
Set<TableName> getOnlineTables();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a reference to the servers' connection.
|
||||||
|
*
|
||||||
|
* Important note: this method returns a reference to Connection which is managed
|
||||||
|
* by Server itself, so callers must NOT attempt to close connection obtained.
|
||||||
|
*/
|
||||||
|
Connection getConnection();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return The unique server name for this server.
|
||||||
|
*/
|
||||||
|
ServerName getServerName();
|
||||||
|
}
|
|
@ -21,17 +21,14 @@ package org.apache.hadoop.hbase.regionserver;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.yetus.audience.InterfaceStability;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Abstraction that allows different modules in RegionServer to update/get
|
* Abstraction that allows different modules in RegionServer to update/get
|
||||||
* the favored nodes information for regions.
|
* the favored nodes information for regions.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
|
||||||
public interface FavoredNodesForRegion {
|
public interface FavoredNodesForRegion {
|
||||||
/**
|
/**
|
||||||
* Used to update the favored nodes mapping when required.
|
* Used to update the favored nodes mapping when required.
|
||||||
|
|
|
@ -2757,7 +2757,7 @@ public class HRegionServer extends HasThread implements
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void addToOnlineRegions(Region region) {
|
public void addRegion(Region region) {
|
||||||
this.onlineRegions.put(region.getRegionInfo().getEncodedName(), region);
|
this.onlineRegions.put(region.getRegionInfo().getEncodedName(), region);
|
||||||
configurationManager.registerObserver(region);
|
configurationManager.registerObserver(region);
|
||||||
}
|
}
|
||||||
|
@ -3003,7 +3003,7 @@ public class HRegionServer extends HasThread implements
|
||||||
* @return Online regions from <code>tableName</code>
|
* @return Online regions from <code>tableName</code>
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public List<Region> getOnlineRegions(TableName tableName) {
|
public List<Region> getRegions(TableName tableName) {
|
||||||
List<Region> tableRegions = new ArrayList<>();
|
List<Region> tableRegions = new ArrayList<>();
|
||||||
synchronized (this.onlineRegions) {
|
synchronized (this.onlineRegions) {
|
||||||
for (Region region: this.onlineRegions.values()) {
|
for (Region region: this.onlineRegions.values()) {
|
||||||
|
@ -3017,7 +3017,7 @@ public class HRegionServer extends HasThread implements
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<Region> getOnlineRegions() {
|
public List<Region> getRegions() {
|
||||||
List<Region> allRegions = new ArrayList<>();
|
List<Region> allRegions = new ArrayList<>();
|
||||||
synchronized (this.onlineRegions) {
|
synchronized (this.onlineRegions) {
|
||||||
// Return a clone copy of the onlineRegions
|
// Return a clone copy of the onlineRegions
|
||||||
|
@ -3103,7 +3103,7 @@ public class HRegionServer extends HasThread implements
|
||||||
protected boolean closeRegion(String encodedName, final boolean abort, final ServerName sn)
|
protected boolean closeRegion(String encodedName, final boolean abort, final ServerName sn)
|
||||||
throws NotServingRegionException {
|
throws NotServingRegionException {
|
||||||
//Check for permissions to close.
|
//Check for permissions to close.
|
||||||
Region actualRegion = this.getFromOnlineRegions(encodedName);
|
Region actualRegion = this.getRegion(encodedName);
|
||||||
// Can be null if we're calling close on a region that's not online
|
// Can be null if we're calling close on a region that's not online
|
||||||
if ((actualRegion != null) && (actualRegion.getCoprocessorHost() != null)) {
|
if ((actualRegion != null) && (actualRegion.getCoprocessorHost() != null)) {
|
||||||
try {
|
try {
|
||||||
|
@ -3128,7 +3128,7 @@ public class HRegionServer extends HasThread implements
|
||||||
return closeRegion(encodedName, abort, sn);
|
return closeRegion(encodedName, abort, sn);
|
||||||
}
|
}
|
||||||
// Let's get the region from the online region list again
|
// Let's get the region from the online region list again
|
||||||
actualRegion = this.getFromOnlineRegions(encodedName);
|
actualRegion = this.getRegion(encodedName);
|
||||||
if (actualRegion == null) { // If already online, we still need to close it.
|
if (actualRegion == null) { // If already online, we still need to close it.
|
||||||
LOG.info("The opening previously in progress has been cancelled by a CLOSE request.");
|
LOG.info("The opening previously in progress has been cancelled by a CLOSE request.");
|
||||||
// The master deletes the znode when it receives this exception.
|
// The master deletes the znode when it receives this exception.
|
||||||
|
@ -3170,7 +3170,7 @@ public class HRegionServer extends HasThread implements
|
||||||
protected boolean closeAndOfflineRegionForSplitOrMerge(
|
protected boolean closeAndOfflineRegionForSplitOrMerge(
|
||||||
final List<String> regionEncodedName) throws IOException {
|
final List<String> regionEncodedName) throws IOException {
|
||||||
for (int i = 0; i < regionEncodedName.size(); ++i) {
|
for (int i = 0; i < regionEncodedName.size(); ++i) {
|
||||||
Region regionToClose = this.getFromOnlineRegions(regionEncodedName.get(i));
|
Region regionToClose = this.getRegion(regionEncodedName.get(i));
|
||||||
if (regionToClose != null) {
|
if (regionToClose != null) {
|
||||||
Map<byte[], List<HStoreFile>> hstoreFiles = null;
|
Map<byte[], List<HStoreFile>> hstoreFiles = null;
|
||||||
Exception exceptionToThrow = null;
|
Exception exceptionToThrow = null;
|
||||||
|
@ -3211,7 +3211,7 @@ public class HRegionServer extends HasThread implements
|
||||||
MetaTableAccessor.putToMetaTable(getConnection(), finalBarrier);
|
MetaTableAccessor.putToMetaTable(getConnection(), finalBarrier);
|
||||||
}
|
}
|
||||||
// Offline the region
|
// Offline the region
|
||||||
this.removeFromOnlineRegions(regionToClose, null);
|
this.removeRegion(regionToClose, null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -3232,13 +3232,13 @@ public class HRegionServer extends HasThread implements
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Region getFromOnlineRegions(final String encodedRegionName) {
|
public Region getRegion(final String encodedRegionName) {
|
||||||
return this.onlineRegions.get(encodedRegionName);
|
return this.onlineRegions.get(encodedRegionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean removeFromOnlineRegions(final Region r, ServerName destination) {
|
public boolean removeRegion(final Region r, ServerName destination) {
|
||||||
Region toReturn = this.onlineRegions.remove(r.getRegionInfo().getEncodedName());
|
Region toReturn = this.onlineRegions.remove(r.getRegionInfo().getEncodedName());
|
||||||
if (destination != null) {
|
if (destination != null) {
|
||||||
long closeSeqNum = r.getMaxFlushedSeqId();
|
long closeSeqNum = r.getMaxFlushedSeqId();
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.apache.yetus.audience.InterfaceStability;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Interface to Map of online regions. In the Map, the key is the region's
|
||||||
|
* encoded name and the value is an {@link Region} instance.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public interface ImmutableOnlineRegions {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return {@link Region} instance.
|
||||||
|
* Only works if caller is in same context, in same JVM. Region is not
|
||||||
|
* serializable.
|
||||||
|
* @param encodedRegionName
|
||||||
|
* @return Region for the passed encoded <code>encodedRegionName</code> or
|
||||||
|
* null if named region is not member of the online regions.
|
||||||
|
*/
|
||||||
|
Region getRegion(String encodedRegionName);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all online regions of a table in this RS.
|
||||||
|
* @param tableName
|
||||||
|
* @return List of Region
|
||||||
|
* @throws java.io.IOException
|
||||||
|
*/
|
||||||
|
List<Region> getRegions(TableName tableName) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all online regions in this RS.
|
||||||
|
* @return List of online Region
|
||||||
|
*/
|
||||||
|
List<Region> getRegions();
|
||||||
|
}
|
|
@ -110,7 +110,7 @@ public class IncreasingToUpperBoundRegionSplitPolicy extends ConstantSizeRegionS
|
||||||
TableName tablename = region.getTableDescriptor().getTableName();
|
TableName tablename = region.getTableDescriptor().getTableName();
|
||||||
int tableRegionsCount = 0;
|
int tableRegionsCount = 0;
|
||||||
try {
|
try {
|
||||||
List<Region> hri = rss.getOnlineRegions(tablename);
|
List<Region> hri = rss.getRegions(tablename);
|
||||||
tableRegionsCount = hri == null || hri.isEmpty() ? 0 : hri.size();
|
tableRegionsCount = hri == null || hri.isEmpty() ? 0 : hri.size();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.debug("Failed getOnlineRegions " + tablename, e);
|
LOG.debug("Failed getOnlineRegions " + tablename, e);
|
||||||
|
|
|
@ -204,7 +204,7 @@ public class LogRoller extends HasThread implements Closeable {
|
||||||
*/
|
*/
|
||||||
private void scheduleFlush(final byte [] encodedRegionName) {
|
private void scheduleFlush(final byte [] encodedRegionName) {
|
||||||
boolean scheduled = false;
|
boolean scheduled = false;
|
||||||
Region r = this.services.getFromOnlineRegions(Bytes.toString(encodedRegionName));
|
Region r = this.services.getRegion(Bytes.toString(encodedRegionName));
|
||||||
FlushRequester requester = null;
|
FlushRequester requester = null;
|
||||||
if (r != null) {
|
if (r != null) {
|
||||||
requester = this.services.getFlushRequester();
|
requester = this.services.getFlushRequester();
|
||||||
|
|
|
@ -18,28 +18,21 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.yetus.audience.InterfaceStability;
|
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.Server;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface to Map of online regions. In the Map, the key is the region's
|
* Interface to Map of online regions. In the Map, the key is the region's
|
||||||
* encoded name and the value is an {@link Region} instance.
|
* encoded name and the value is an {@link Region} instance.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
public interface OnlineRegions extends ImmutableOnlineRegions {
|
||||||
public interface OnlineRegions extends Server {
|
|
||||||
/**
|
/**
|
||||||
* Add to online regions.
|
* Add to online regions.
|
||||||
* @param r
|
* @param r
|
||||||
*/
|
*/
|
||||||
void addToOnlineRegions(final Region r);
|
void addRegion(final Region r);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method removes Region corresponding to hri from the Map of onlineRegions.
|
* This method removes Region corresponding to hri from the Map of onlineRegions.
|
||||||
|
@ -48,29 +41,5 @@ public interface OnlineRegions extends Server {
|
||||||
* @param destination Destination, if any, null otherwise.
|
* @param destination Destination, if any, null otherwise.
|
||||||
* @return True if we removed a region from online list.
|
* @return True if we removed a region from online list.
|
||||||
*/
|
*/
|
||||||
boolean removeFromOnlineRegions(final Region r, ServerName destination);
|
boolean removeRegion(final Region r, ServerName destination);
|
||||||
|
|
||||||
/**
|
|
||||||
* Return {@link Region} instance.
|
|
||||||
* Only works if caller is in same context, in same JVM. Region is not
|
|
||||||
* serializable.
|
|
||||||
* @param encodedRegionName
|
|
||||||
* @return Region for the passed encoded <code>encodedRegionName</code> or
|
|
||||||
* null if named region is not member of the online regions.
|
|
||||||
*/
|
|
||||||
Region getFromOnlineRegions(String encodedRegionName);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get all online regions of a table in this RS.
|
|
||||||
* @param tableName
|
|
||||||
* @return List of Region
|
|
||||||
* @throws java.io.IOException
|
|
||||||
*/
|
|
||||||
List<Region> getOnlineRegions(TableName tableName) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get all online regions in this RS.
|
|
||||||
* @return List of online Region
|
|
||||||
*/
|
|
||||||
List<Region> getOnlineRegions();
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,7 +106,7 @@ public class RSDumpServlet extends StateDumpServlet {
|
||||||
|
|
||||||
public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
|
public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
for (Region region : hrs.getOnlineRegions()) {
|
for (Region region : hrs.getRegions()) {
|
||||||
HRegion hRegion = (HRegion)region;
|
HRegion hRegion = (HRegion)region;
|
||||||
if (hRegion.getLockedRows().size() > 0) {
|
if (hRegion.getLockedRows().size() > 0) {
|
||||||
for (HRegion.RowLockContext rowLockContext : hRegion.getLockedRows().values()) {
|
for (HRegion.RowLockContext rowLockContext : hRegion.getLockedRows().values()) {
|
||||||
|
|
|
@ -1712,9 +1712,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
||||||
List<Region> regions;
|
List<Region> regions;
|
||||||
if (request.hasTableName()) {
|
if (request.hasTableName()) {
|
||||||
TableName tableName = ProtobufUtil.toTableName(request.getTableName());
|
TableName tableName = ProtobufUtil.toTableName(request.getTableName());
|
||||||
regions = regionServer.getOnlineRegions(tableName);
|
regions = regionServer.getRegions(tableName);
|
||||||
} else {
|
} else {
|
||||||
regions = regionServer.getOnlineRegions();
|
regions = regionServer.getRegions();
|
||||||
}
|
}
|
||||||
List<RegionLoad> rLoads = new ArrayList<>(regions.size());
|
List<RegionLoad> rLoads = new ArrayList<>(regions.size());
|
||||||
RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder();
|
RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder();
|
||||||
|
@ -1902,7 +1902,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
||||||
try {
|
try {
|
||||||
String encodedName = region.getEncodedName();
|
String encodedName = region.getEncodedName();
|
||||||
byte[] encodedNameBytes = region.getEncodedNameAsBytes();
|
byte[] encodedNameBytes = region.getEncodedNameAsBytes();
|
||||||
final Region onlineRegion = regionServer.getFromOnlineRegions(encodedName);
|
final Region onlineRegion = regionServer.getRegion(encodedName);
|
||||||
if (onlineRegion != null) {
|
if (onlineRegion != null) {
|
||||||
// The region is already online. This should not happen any more.
|
// The region is already online. This should not happen any more.
|
||||||
String error = "Received OPEN for the region:"
|
String error = "Received OPEN for the region:"
|
||||||
|
@ -1919,7 +1919,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
||||||
encodedNameBytes, Boolean.TRUE);
|
encodedNameBytes, Boolean.TRUE);
|
||||||
|
|
||||||
if (Boolean.FALSE.equals(previous)) {
|
if (Boolean.FALSE.equals(previous)) {
|
||||||
if (regionServer.getFromOnlineRegions(encodedName) != null) {
|
if (regionServer.getRegion(encodedName) != null) {
|
||||||
// There is a close in progress. This should not happen any more.
|
// There is a close in progress. This should not happen any more.
|
||||||
String error = "Received OPEN for the region:"
|
String error = "Received OPEN for the region:"
|
||||||
+ region.getRegionNameAsString() + ", which we are already trying to CLOSE";
|
+ region.getRegionNameAsString() + ", which we are already trying to CLOSE";
|
||||||
|
@ -2027,7 +2027,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
||||||
checkOpen();
|
checkOpen();
|
||||||
String encodedName = region.getEncodedName();
|
String encodedName = region.getEncodedName();
|
||||||
byte[] encodedNameBytes = region.getEncodedNameAsBytes();
|
byte[] encodedNameBytes = region.getEncodedNameAsBytes();
|
||||||
final Region onlineRegion = regionServer.getFromOnlineRegions(encodedName);
|
final Region onlineRegion = regionServer.getRegion(encodedName);
|
||||||
|
|
||||||
if (onlineRegion != null) {
|
if (onlineRegion != null) {
|
||||||
LOG.info("Region already online. Skipping warming up " + region);
|
LOG.info("Region already online. Skipping warming up " + region);
|
||||||
|
|
|
@ -138,7 +138,7 @@ public class RegionCoprocessorHost
|
||||||
|
|
||||||
/** @return reference to the region server services */
|
/** @return reference to the region server services */
|
||||||
@Override
|
@Override
|
||||||
public RegionServerServices getRegionServerServices() {
|
public CoprocessorRegionServerServices getCoprocessorRegionServerServices() {
|
||||||
return rsServices;
|
return rsServices;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -309,7 +309,7 @@ public class RegionServerCoprocessorHost extends
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RegionServerServices getRegionServerServices() {
|
public CoprocessorRegionServerServices getCoprocessorRegionServerServices() {
|
||||||
return regionServerServices;
|
return regionServerServices;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,24 +21,21 @@ package org.apache.hadoop.hbase.regionserver;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.yetus.audience.InterfaceStability;
|
|
||||||
import org.apache.hadoop.hbase.client.locking.EntityLock;
|
import org.apache.hadoop.hbase.client.locking.EntityLock;
|
||||||
import org.apache.hadoop.hbase.executor.ExecutorService;
|
import org.apache.hadoop.hbase.executor.ExecutorService;
|
||||||
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
|
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
|
|
||||||
import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
|
import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
|
||||||
import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
|
import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
|
||||||
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
|
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
|
||||||
import org.apache.hadoop.hbase.wal.WAL;
|
import org.apache.hadoop.hbase.wal.WAL;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
|
||||||
import com.google.protobuf.Service;
|
import com.google.protobuf.Service;
|
||||||
|
@ -46,13 +43,9 @@ import com.google.protobuf.Service;
|
||||||
/**
|
/**
|
||||||
* Services provided by {@link HRegionServer}
|
* Services provided by {@link HRegionServer}
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
public interface RegionServerServices
|
||||||
public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegion {
|
extends Server, OnlineRegions, FavoredNodesForRegion, CoprocessorRegionServerServices {
|
||||||
/**
|
|
||||||
* @return True if this regionserver is stopping.
|
|
||||||
*/
|
|
||||||
boolean isStopping();
|
|
||||||
|
|
||||||
/** @return the WAL for a particular region. Pass null for getting the
|
/** @return the WAL for a particular region. Pass null for getting the
|
||||||
* default (common) WAL */
|
* default (common) WAL */
|
||||||
|
@ -188,11 +181,6 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
|
||||||
*/
|
*/
|
||||||
ConcurrentMap<byte[], Boolean> getRegionsInTransitionInRS();
|
ConcurrentMap<byte[], Boolean> getRegionsInTransitionInRS();
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Return the FileSystem object used by the regionserver
|
|
||||||
*/
|
|
||||||
FileSystem getFileSystem();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return The RegionServer's "Leases" service
|
* @return The RegionServer's "Leases" service
|
||||||
*/
|
*/
|
||||||
|
@ -214,12 +202,6 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
|
||||||
*/
|
*/
|
||||||
public ServerNonceManager getNonceManager();
|
public ServerNonceManager getNonceManager();
|
||||||
|
|
||||||
/**
|
|
||||||
* @return all the online tables in this RS
|
|
||||||
*/
|
|
||||||
Set<TableName> getOnlineTables();
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Registers a new protocol buffer {@link Service} subclass as a coprocessor endpoint to be
|
* Registers a new protocol buffer {@link Service} subclass as a coprocessor endpoint to be
|
||||||
* available for handling
|
* available for handling
|
||||||
|
|
|
@ -122,7 +122,7 @@ public class StorefileRefresherChore extends ScheduledChore {
|
||||||
Iterator<String> lastRefreshTimesIter = lastRefreshTimes.keySet().iterator();
|
Iterator<String> lastRefreshTimesIter = lastRefreshTimes.keySet().iterator();
|
||||||
while (lastRefreshTimesIter.hasNext()) {
|
while (lastRefreshTimesIter.hasNext()) {
|
||||||
String encodedName = lastRefreshTimesIter.next();
|
String encodedName = lastRefreshTimesIter.next();
|
||||||
if (regionServer.getFromOnlineRegions(encodedName) == null) {
|
if (regionServer.getRegion(encodedName) == null) {
|
||||||
lastRefreshTimesIter.remove();
|
lastRefreshTimesIter.remove();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,7 +90,7 @@ public class CloseRegionHandler extends EventHandler {
|
||||||
LOG.debug("Processing close of " + name);
|
LOG.debug("Processing close of " + name);
|
||||||
String encodedRegionName = regionInfo.getEncodedName();
|
String encodedRegionName = regionInfo.getEncodedName();
|
||||||
// Check that this region is being served here
|
// Check that this region is being served here
|
||||||
HRegion region = (HRegion)rsServices.getFromOnlineRegions(encodedRegionName);
|
HRegion region = (HRegion)rsServices.getRegion(encodedRegionName);
|
||||||
if (region == null) {
|
if (region == null) {
|
||||||
LOG.warn("Received CLOSE for region " + name + " but currently not serving - ignoring");
|
LOG.warn("Received CLOSE for region " + name + " but currently not serving - ignoring");
|
||||||
// TODO: do better than a simple warning
|
// TODO: do better than a simple warning
|
||||||
|
@ -115,7 +115,7 @@ public class CloseRegionHandler extends EventHandler {
|
||||||
throw new RuntimeException(ioe);
|
throw new RuntimeException(ioe);
|
||||||
}
|
}
|
||||||
|
|
||||||
this.rsServices.removeFromOnlineRegions(region, destination);
|
this.rsServices.removeRegion(region, destination);
|
||||||
rsServices.reportRegionStateTransition(TransitionCode.CLOSED, regionInfo);
|
rsServices.reportRegionStateTransition(TransitionCode.CLOSED, regionInfo);
|
||||||
|
|
||||||
// Done! Region is closed on this RS
|
// Done! Region is closed on this RS
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class OpenRegionHandler extends EventHandler {
|
||||||
// 2) The region is now marked as online while we're suppose to open. This would be a bug.
|
// 2) The region is now marked as online while we're suppose to open. This would be a bug.
|
||||||
|
|
||||||
// Check that this region is not already online
|
// Check that this region is not already online
|
||||||
if (this.rsServices.getFromOnlineRegions(encodedName) != null) {
|
if (this.rsServices.getRegion(encodedName) != null) {
|
||||||
LOG.error("Region " + encodedName +
|
LOG.error("Region " + encodedName +
|
||||||
" was already online when we started processing the opening. " +
|
" was already online when we started processing the opening. " +
|
||||||
"Marking this new attempt as failed");
|
"Marking this new attempt as failed");
|
||||||
|
@ -119,7 +119,7 @@ public class OpenRegionHandler extends EventHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Successful region open, and add it to OnlineRegions
|
// Successful region open, and add it to OnlineRegions
|
||||||
this.rsServices.addToOnlineRegions(region);
|
this.rsServices.addRegion(region);
|
||||||
openSuccessful = true;
|
openSuccessful = true;
|
||||||
|
|
||||||
// Done! Successful region open
|
// Done! Successful region open
|
||||||
|
@ -313,7 +313,7 @@ public class OpenRegionHandler extends EventHandler {
|
||||||
|
|
||||||
void cleanupFailedOpen(final HRegion region) throws IOException {
|
void cleanupFailedOpen(final HRegion region) throws IOException {
|
||||||
if (region != null) {
|
if (region != null) {
|
||||||
this.rsServices.removeFromOnlineRegions(region, null);
|
this.rsServices.removeRegion(region, null);
|
||||||
region.close();
|
region.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -223,7 +223,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private List<Region> getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException {
|
private List<Region> getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException {
|
||||||
List<Region> onlineRegions = rss.getOnlineRegions(TableName.valueOf(snapshot.getTable()));
|
List<Region> onlineRegions = rss.getRegions(TableName.valueOf(snapshot.getTable()));
|
||||||
Iterator<Region> iterator = onlineRegions.iterator();
|
Iterator<Region> iterator = onlineRegions.iterator();
|
||||||
// remove the non-default regions
|
// remove the non-default regions
|
||||||
while (iterator.hasNext()) {
|
while (iterator.hasNext()) {
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class ReplicationObserver implements RegionObserver {
|
||||||
+ "data replication.");
|
+ "data replication.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
HRegionServer rs = (HRegionServer) env.getRegionServerServices();
|
HRegionServer rs = (HRegionServer) env.getCoprocessorRegionServerServices();
|
||||||
Replication rep = (Replication) rs.getReplicationSourceService();
|
Replication rep = (Replication) rs.getReplicationSourceService();
|
||||||
rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs);
|
rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,6 +104,7 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
||||||
import org.apache.hadoop.hbase.regionserver.Region;
|
import org.apache.hadoop.hbase.regionserver.Region;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||||
import org.apache.hadoop.hbase.regionserver.ScannerContext;
|
import org.apache.hadoop.hbase.regionserver.ScannerContext;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
|
@ -949,12 +950,14 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS
|
||||||
zk = mEnv.getMasterServices().getZooKeeper();
|
zk = mEnv.getMasterServices().getZooKeeper();
|
||||||
} else if (env instanceof RegionServerCoprocessorEnvironment) {
|
} else if (env instanceof RegionServerCoprocessorEnvironment) {
|
||||||
RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
|
RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
|
||||||
zk = rsEnv.getRegionServerServices().getZooKeeper();
|
assert rsEnv.getCoprocessorRegionServerServices() instanceof RegionServerServices;
|
||||||
|
zk = ((RegionServerServices) rsEnv.getCoprocessorRegionServerServices()).getZooKeeper();
|
||||||
} else if (env instanceof RegionCoprocessorEnvironment) {
|
} else if (env instanceof RegionCoprocessorEnvironment) {
|
||||||
// if running at region
|
// if running at region
|
||||||
regionEnv = (RegionCoprocessorEnvironment) env;
|
regionEnv = (RegionCoprocessorEnvironment) env;
|
||||||
conf.addBytesMap(regionEnv.getRegion().getTableDescriptor().getValues());
|
conf.addBytesMap(regionEnv.getRegion().getTableDescriptor().getValues());
|
||||||
zk = regionEnv.getRegionServerServices().getZooKeeper();
|
assert regionEnv.getCoprocessorRegionServerServices() instanceof RegionServerServices;
|
||||||
|
zk = ((RegionServerServices) regionEnv.getCoprocessorRegionServerServices()).getZooKeeper();
|
||||||
compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT,
|
compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT,
|
||||||
AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
|
AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||||
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
|
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -61,7 +62,9 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService
|
||||||
if (env instanceof RegionCoprocessorEnvironment) {
|
if (env instanceof RegionCoprocessorEnvironment) {
|
||||||
RegionCoprocessorEnvironment regionEnv =
|
RegionCoprocessorEnvironment regionEnv =
|
||||||
(RegionCoprocessorEnvironment)env;
|
(RegionCoprocessorEnvironment)env;
|
||||||
RpcServerInterface server = regionEnv.getRegionServerServices().getRpcServer();
|
assert regionEnv.getCoprocessorRegionServerServices() instanceof RegionServerServices;
|
||||||
|
RpcServerInterface server = ((RegionServerServices) regionEnv
|
||||||
|
.getCoprocessorRegionServerServices()).getRpcServer();
|
||||||
SecretManager<?> mgr = ((RpcServer)server).getSecretManager();
|
SecretManager<?> mgr = ((RpcServer)server).getSecretManager();
|
||||||
if (mgr instanceof AuthenticationTokenSecretManager) {
|
if (mgr instanceof AuthenticationTokenSecretManager) {
|
||||||
secretManager = (AuthenticationTokenSecretManager)mgr;
|
secretManager = (AuthenticationTokenSecretManager)mgr;
|
||||||
|
|
|
@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.io.util.StreamUtils;
|
||||||
import org.apache.hadoop.hbase.regionserver.OperationStatus;
|
import org.apache.hadoop.hbase.regionserver.OperationStatus;
|
||||||
import org.apache.hadoop.hbase.regionserver.Region;
|
import org.apache.hadoop.hbase.regionserver.Region;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.security.Superusers;
|
import org.apache.hadoop.hbase.security.Superusers;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -111,7 +112,9 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void init(RegionCoprocessorEnvironment e) throws IOException {
|
public void init(RegionCoprocessorEnvironment e) throws IOException {
|
||||||
ZooKeeperWatcher zk = e.getRegionServerServices().getZooKeeper();
|
assert e.getCoprocessorRegionServerServices() instanceof RegionServerServices;
|
||||||
|
ZooKeeperWatcher zk = ((RegionServerServices) e.getCoprocessorRegionServerServices())
|
||||||
|
.getZooKeeper();
|
||||||
try {
|
try {
|
||||||
labelsCache = VisibilityLabelsCache.createAndGet(zk, this.conf);
|
labelsCache = VisibilityLabelsCache.createAndGet(zk, this.conf);
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
HRegionServer rs = (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER);
|
HRegionServer rs = (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER);
|
||||||
Configuration conf = rs.getConfiguration();
|
Configuration conf = rs.getConfiguration();
|
||||||
|
|
||||||
Region region = rs.getFromOnlineRegions(regionName);
|
Region region = rs.getRegion(regionName);
|
||||||
String displayName = HRegionInfo.getRegionNameAsStringForDisplay(region.getRegionInfo(),
|
String displayName = HRegionInfo.getRegionNameAsStringForDisplay(region.getRegionInfo(),
|
||||||
rs.getConfiguration());
|
rs.getConfiguration());
|
||||||
%>
|
%>
|
||||||
|
|
|
@ -4442,7 +4442,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName,
|
public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName,
|
||||||
final byte[] family) {
|
final byte[] family) {
|
||||||
int numHFiles = 0;
|
int numHFiles = 0;
|
||||||
for (Region region : rs.getOnlineRegions(tableName)) {
|
for (Region region : rs.getRegions(tableName)) {
|
||||||
numHFiles += region.getStore(family).getStorefilesCount();
|
numHFiles += region.getStore(family).getStorefilesCount();
|
||||||
}
|
}
|
||||||
return numHFiles;
|
return numHFiles;
|
||||||
|
|
|
@ -801,7 +801,7 @@ public class MiniHBaseCluster extends HBaseCluster {
|
||||||
ArrayList<HRegion> ret = new ArrayList<>();
|
ArrayList<HRegion> ret = new ArrayList<>();
|
||||||
for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
|
for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
|
||||||
HRegionServer hrs = rst.getRegionServer();
|
HRegionServer hrs = rst.getRegionServer();
|
||||||
for (Region region : hrs.getOnlineRegions(tableName)) {
|
for (Region region : hrs.getRegions(tableName)) {
|
||||||
if (region.getTableDescriptor().getTableName().equals(tableName)) {
|
if (region.getTableDescriptor().getTableName().equals(tableName)) {
|
||||||
ret.add((HRegion)region);
|
ret.add((HRegion)region);
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,17 +94,17 @@ public class MockRegionServerServices implements RegionServerServices {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean removeFromOnlineRegions(Region r, ServerName destination) {
|
public boolean removeRegion(Region r, ServerName destination) {
|
||||||
return this.regions.remove(r.getRegionInfo().getEncodedName()) != null;
|
return this.regions.remove(r.getRegionInfo().getEncodedName()) != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Region getFromOnlineRegions(String encodedRegionName) {
|
public Region getRegion(String encodedRegionName) {
|
||||||
return this.regions.get(encodedRegionName);
|
return this.regions.get(encodedRegionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<Region> getOnlineRegions(TableName tableName) throws IOException {
|
public List<Region> getRegions(TableName tableName) throws IOException {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,24 +114,24 @@ public class MockRegionServerServices implements RegionServerServices {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<Region> getOnlineRegions() {
|
public List<Region> getRegions() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void addToOnlineRegions(Region r) {
|
public void addRegion(Region r) {
|
||||||
this.regions.put(r.getRegionInfo().getEncodedName(), r);
|
this.regions.put(r.getRegionInfo().getEncodedName(), r);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void postOpenDeployTasks(Region r) throws KeeperException, IOException {
|
public void postOpenDeployTasks(Region r) throws KeeperException, IOException {
|
||||||
addToOnlineRegions(r);
|
addRegion(r);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void postOpenDeployTasks(PostOpenDeployContext context) throws KeeperException,
|
public void postOpenDeployTasks(PostOpenDeployContext context) throws KeeperException,
|
||||||
IOException {
|
IOException {
|
||||||
addToOnlineRegions(context.getRegion());
|
addRegion(context.getRegion());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -93,9 +93,7 @@ public class TestGlobalMemStoreSize {
|
||||||
long globalMemStoreSize = 0;
|
long globalMemStoreSize = 0;
|
||||||
for (HRegionInfo regionInfo :
|
for (HRegionInfo regionInfo :
|
||||||
ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
|
ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
|
||||||
globalMemStoreSize +=
|
globalMemStoreSize += server.getRegion(regionInfo.getEncodedName()).getMemstoreSize();
|
||||||
server.getFromOnlineRegions(regionInfo.getEncodedName()).
|
|
||||||
getMemstoreSize();
|
|
||||||
}
|
}
|
||||||
assertEquals(server.getRegionServerAccounting().getGlobalMemstoreDataSize(),
|
assertEquals(server.getRegionServerAccounting().getGlobalMemstoreDataSize(),
|
||||||
globalMemStoreSize);
|
globalMemStoreSize);
|
||||||
|
@ -109,7 +107,7 @@ public class TestGlobalMemStoreSize {
|
||||||
|
|
||||||
for (HRegionInfo regionInfo :
|
for (HRegionInfo regionInfo :
|
||||||
ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
|
ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
|
||||||
Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
|
Region r = server.getRegion(regionInfo.getEncodedName());
|
||||||
flush(r, server);
|
flush(r, server);
|
||||||
}
|
}
|
||||||
LOG.info("Post flush on " + server.getServerName());
|
LOG.info("Post flush on " + server.getServerName());
|
||||||
|
@ -125,7 +123,7 @@ public class TestGlobalMemStoreSize {
|
||||||
// our test was running....
|
// our test was running....
|
||||||
for (HRegionInfo regionInfo :
|
for (HRegionInfo regionInfo :
|
||||||
ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
|
ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
|
||||||
Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
|
Region r = server.getRegion(regionInfo.getEncodedName());
|
||||||
long l = r.getMemstoreSize();
|
long l = r.getMemstoreSize();
|
||||||
if (l > 0) {
|
if (l > 0) {
|
||||||
// Only meta could have edits at this stage. Give it another flush
|
// Only meta could have edits at this stage. Give it another flush
|
||||||
|
|
|
@ -241,7 +241,7 @@ public class TestHFileArchiving {
|
||||||
UTIL.loadRegion(region, TEST_FAM);
|
UTIL.loadRegion(region, TEST_FAM);
|
||||||
|
|
||||||
// get the hfiles in the region
|
// get the hfiles in the region
|
||||||
List<Region> regions = hrs.getOnlineRegions(tableName);
|
List<Region> regions = hrs.getRegions(tableName);
|
||||||
assertEquals("More that 1 region for test table.", 1, regions.size());
|
assertEquals("More that 1 region for test table.", 1, regions.size());
|
||||||
|
|
||||||
region = regions.get(0);
|
region = regions.get(0);
|
||||||
|
@ -320,7 +320,7 @@ public class TestHFileArchiving {
|
||||||
UTIL.loadRegion(region, TEST_FAM);
|
UTIL.loadRegion(region, TEST_FAM);
|
||||||
|
|
||||||
// get the hfiles in the region
|
// get the hfiles in the region
|
||||||
List<Region> regions = hrs.getOnlineRegions(tableName);
|
List<Region> regions = hrs.getRegions(tableName);
|
||||||
assertEquals("More that 1 region for test table.", 1, regions.size());
|
assertEquals("More that 1 region for test table.", 1, regions.size());
|
||||||
|
|
||||||
region = regions.get(0);
|
region = regions.get(0);
|
||||||
|
|
|
@ -180,7 +180,7 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
|
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
|
||||||
List<Region> regions = new ArrayList<>();
|
List<Region> regions = new ArrayList<>();
|
||||||
regions.add(region);
|
regions.add(region);
|
||||||
when(rss.getOnlineRegions()).thenReturn(regions);
|
when(rss.getRegions()).thenReturn(regions);
|
||||||
final CompactedHFilesDischarger compactionCleaner =
|
final CompactedHFilesDischarger compactionCleaner =
|
||||||
new CompactedHFilesDischarger(100, stop, rss, false);
|
new CompactedHFilesDischarger(100, stop, rss, false);
|
||||||
loadFlushAndCompact(region, TEST_FAM);
|
loadFlushAndCompact(region, TEST_FAM);
|
||||||
|
@ -233,7 +233,7 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
|
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
|
||||||
List<Region> regions = new ArrayList<>();
|
List<Region> regions = new ArrayList<>();
|
||||||
regions.add(region);
|
regions.add(region);
|
||||||
when(rss.getOnlineRegions()).thenReturn(regions);
|
when(rss.getRegions()).thenReturn(regions);
|
||||||
final CompactedHFilesDischarger compactionCleaner =
|
final CompactedHFilesDischarger compactionCleaner =
|
||||||
new CompactedHFilesDischarger(100, stop, rss, false);
|
new CompactedHFilesDischarger(100, stop, rss, false);
|
||||||
loadFlushAndCompact(region, TEST_FAM);
|
loadFlushAndCompact(region, TEST_FAM);
|
||||||
|
@ -243,7 +243,7 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
|
HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
|
||||||
regions = new ArrayList<>();
|
regions = new ArrayList<>();
|
||||||
regions.add(otherRegion);
|
regions.add(otherRegion);
|
||||||
when(rss.getOnlineRegions()).thenReturn(regions);
|
when(rss.getRegions()).thenReturn(regions);
|
||||||
final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop,
|
final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop,
|
||||||
rss, false);
|
rss, false);
|
||||||
loadFlushAndCompact(otherRegion, TEST_FAM);
|
loadFlushAndCompact(otherRegion, TEST_FAM);
|
||||||
|
|
|
@ -64,7 +64,7 @@ public class TestAsyncMetaRegionLocator {
|
||||||
private Optional<ServerName> getRSCarryingMeta() {
|
private Optional<ServerName> getRSCarryingMeta() {
|
||||||
return TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
|
return TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
|
||||||
.map(t -> t.getRegionServer())
|
.map(t -> t.getRegionServer())
|
||||||
.filter(rs -> !rs.getOnlineRegions(TableName.META_TABLE_NAME).isEmpty()).findAny()
|
.filter(rs -> !rs.getRegions(TableName.META_TABLE_NAME).isEmpty()).findAny()
|
||||||
.map(rs -> rs.getServerName());
|
.map(rs -> rs.getServerName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ public class TestAsyncNonMetaRegionLocator {
|
||||||
ServerName[] serverNames = new ServerName[startKeys.length];
|
ServerName[] serverNames = new ServerName[startKeys.length];
|
||||||
TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer())
|
TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer())
|
||||||
.forEach(rs -> {
|
.forEach(rs -> {
|
||||||
rs.getOnlineRegions(TABLE_NAME).forEach(r -> {
|
rs.getRegions(TABLE_NAME).forEach(r -> {
|
||||||
serverNames[Arrays.binarySearch(startKeys, r.getRegionInfo().getStartKey(),
|
serverNames[Arrays.binarySearch(startKeys, r.getRegionInfo().getStartKey(),
|
||||||
Bytes::compareTo)] = rs.getServerName();
|
Bytes::compareTo)] = rs.getServerName();
|
||||||
});
|
});
|
||||||
|
@ -270,7 +270,7 @@ public class TestAsyncNonMetaRegionLocator {
|
||||||
LOCATOR.getRegionLocation(TABLE_NAME, row, RegionLocateType.AFTER, false).get();
|
LOCATOR.getRegionLocation(TABLE_NAME, row, RegionLocateType.AFTER, false).get();
|
||||||
ServerName afterServerName =
|
ServerName afterServerName =
|
||||||
TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer())
|
TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer())
|
||||||
.filter(rs -> rs.getOnlineRegions(TABLE_NAME).stream()
|
.filter(rs -> rs.getRegions(TABLE_NAME).stream()
|
||||||
.anyMatch(r -> Bytes.equals(splitKey, r.getRegionInfo().getStartKey())))
|
.anyMatch(r -> Bytes.equals(splitKey, r.getRegionInfo().getStartKey())))
|
||||||
.findAny().get().getServerName();
|
.findAny().get().getServerName();
|
||||||
assertLocEquals(splitKey, EMPTY_END_ROW, afterServerName, afterLoc);
|
assertLocEquals(splitKey, EMPTY_END_ROW, afterServerName, afterLoc);
|
||||||
|
|
|
@ -301,7 +301,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
||||||
ServerName serverName = rs.getServerName();
|
ServerName serverName = rs.getServerName();
|
||||||
try {
|
try {
|
||||||
Assert.assertEquals(admin.getOnlineRegions(serverName).get().size(), rs
|
Assert.assertEquals(admin.getOnlineRegions(serverName).get().size(), rs
|
||||||
.getOnlineRegions().size());
|
.getRegions().size());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
fail("admin.getOnlineRegions() method throws a exception: " + e.getMessage());
|
fail("admin.getOnlineRegions() method throws a exception: " + e.getMessage());
|
||||||
}
|
}
|
||||||
|
@ -527,7 +527,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
||||||
TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream()
|
TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream()
|
||||||
.map(rsThread -> rsThread.getRegionServer()).collect(Collectors.toList());
|
.map(rsThread -> rsThread.getRegionServer()).collect(Collectors.toList());
|
||||||
List<Region> regions = new ArrayList<>();
|
List<Region> regions = new ArrayList<>();
|
||||||
rsList.forEach(rs -> regions.addAll(rs.getOnlineRegions(tableName)));
|
rsList.forEach(rs -> regions.addAll(rs.getRegions(tableName)));
|
||||||
Assert.assertEquals(regions.size(), 1);
|
Assert.assertEquals(regions.size(), 1);
|
||||||
int countBefore = countStoreFilesInFamilies(regions, families);
|
int countBefore = countStoreFilesInFamilies(regions, families);
|
||||||
Assert.assertTrue(countBefore > 0);
|
Assert.assertTrue(countBefore > 0);
|
||||||
|
@ -568,7 +568,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
||||||
TEST_UTIL
|
TEST_UTIL
|
||||||
.getHBaseCluster()
|
.getHBaseCluster()
|
||||||
.getLiveRegionServerThreads()
|
.getLiveRegionServerThreads()
|
||||||
.forEach(rsThread -> regions.addAll(rsThread.getRegionServer().getOnlineRegions(tableName)));
|
.forEach(rsThread -> regions.addAll(rsThread.getRegionServer().getRegions(tableName)));
|
||||||
Assert.assertEquals(regions.size(), 1);
|
Assert.assertEquals(regions.size(), 1);
|
||||||
|
|
||||||
int countBefore = countStoreFilesInFamilies(regions, families);
|
int countBefore = countStoreFilesInFamilies(regions, families);
|
||||||
|
|
|
@ -133,7 +133,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region =
|
Region region =
|
||||||
TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
|
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
@ -307,7 +307,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region =
|
Region region =
|
||||||
TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
|
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
|
|
@ -182,8 +182,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
@ -273,8 +272,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
@ -332,8 +330,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
@ -394,8 +391,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
BlockCache cache = setCacheProperties(region);
|
BlockCache cache = setCacheProperties(region);
|
||||||
Put put = new Put(ROW);
|
Put put = new Put(ROW);
|
||||||
put.addColumn(FAMILY, QUALIFIER, data);
|
put.addColumn(FAMILY, QUALIFIER, data);
|
||||||
|
@ -487,8 +483,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
BlockCache cache = setCacheProperties(region);
|
BlockCache cache = setCacheProperties(region);
|
||||||
|
|
||||||
Put put = new Put(ROW);
|
Put put = new Put(ROW);
|
||||||
|
@ -571,8 +566,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region =
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setEvictOnClose(true);
|
cacheConf.setEvictOnClose(true);
|
||||||
|
@ -630,8 +624,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
@ -713,8 +706,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
BlockCache cache = setCacheProperties(region);
|
BlockCache cache = setCacheProperties(region);
|
||||||
|
|
||||||
Put put = new Put(ROW);
|
Put put = new Put(ROW);
|
||||||
|
@ -813,8 +805,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
@ -879,8 +870,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
@ -997,8 +987,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
@ -1127,8 +1116,7 @@ public class TestBlockEvictionFromClient {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
|
|
@ -96,7 +96,7 @@ public class TestClientPushback {
|
||||||
BufferedMutatorImpl mutator = (BufferedMutatorImpl) conn.getBufferedMutator(tableName);
|
BufferedMutatorImpl mutator = (BufferedMutatorImpl) conn.getBufferedMutator(tableName);
|
||||||
|
|
||||||
HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0);
|
HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0);
|
||||||
Region region = rs.getOnlineRegions(tableName).get(0);
|
Region region = rs.getRegions(tableName).get(0);
|
||||||
|
|
||||||
LOG.debug("Writing some data to "+tableName);
|
LOG.debug("Writing some data to "+tableName);
|
||||||
// write some data
|
// write some data
|
||||||
|
@ -183,7 +183,7 @@ public class TestClientPushback {
|
||||||
ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf);
|
ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf);
|
||||||
Table table = conn.getTable(tableName);
|
Table table = conn.getTable(tableName);
|
||||||
HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0);
|
HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0);
|
||||||
Region region = rs.getOnlineRegions(tableName).get(0);
|
Region region = rs.getRegions(tableName).get(0);
|
||||||
|
|
||||||
RowMutations mutations = new RowMutations(Bytes.toBytes("row"));
|
RowMutations mutations = new RowMutations(Bytes.toBytes("row"));
|
||||||
Put p = new Put(Bytes.toBytes("row"));
|
Put p = new Put(Bytes.toBytes("row"));
|
||||||
|
|
|
@ -4528,8 +4528,7 @@ public class TestFromClientSide {
|
||||||
// set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow
|
// set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow
|
||||||
// in Store.rowAtOrBeforeFromStoreFile
|
// in Store.rowAtOrBeforeFromStoreFile
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
Region region =
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
|
|
||||||
Put put1 = new Put(firstRow);
|
Put put1 = new Put(firstRow);
|
||||||
Put put2 = new Put(secondRow);
|
Put put2 = new Put(secondRow);
|
||||||
Put put3 = new Put(thirdRow);
|
Put put3 = new Put(thirdRow);
|
||||||
|
@ -5295,8 +5294,7 @@ public class TestFromClientSide {
|
||||||
// get the block cache and region
|
// get the block cache and region
|
||||||
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
|
|
||||||
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName)
|
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
|
||||||
.getFromOnlineRegions(regionName);
|
|
||||||
Store store = region.getStores().iterator().next();
|
Store store = region.getStores().iterator().next();
|
||||||
CacheConfig cacheConf = store.getCacheConfig();
|
CacheConfig cacheConf = store.getCacheConfig();
|
||||||
cacheConf.setCacheDataOnWrite(true);
|
cacheConf.setCacheDataOnWrite(true);
|
||||||
|
|
|
@ -961,7 +961,7 @@ public class TestFromClientSide3 {
|
||||||
private static Region find(final TableName tableName)
|
private static Region find(final TableName tableName)
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName);
|
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName);
|
||||||
List<Region> regions = rs.getOnlineRegions(tableName);
|
List<Region> regions = rs.getRegions(tableName);
|
||||||
assertEquals(1, regions.size());
|
assertEquals(1, regions.size());
|
||||||
return regions.get(0);
|
return regions.get(0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1328,7 +1328,7 @@ public class TestHCM {
|
||||||
assertTrue(!destServerName.equals(metaServerName));
|
assertTrue(!destServerName.equals(metaServerName));
|
||||||
|
|
||||||
//find another row in the cur server that is less than ROW_X
|
//find another row in the cur server that is less than ROW_X
|
||||||
List<Region> regions = curServer.getOnlineRegions(TABLE_NAME3);
|
List<Region> regions = curServer.getRegions(TABLE_NAME3);
|
||||||
byte[] otherRow = null;
|
byte[] otherRow = null;
|
||||||
for (Region region : regions) {
|
for (Region region : regions) {
|
||||||
if (!region.getRegionInfo().getEncodedName().equals(toMove.getRegionInfo().getEncodedName())
|
if (!region.getRegionInfo().getEncodedName().equals(toMove.getRegionInfo().getEncodedName())
|
||||||
|
|
|
@ -159,7 +159,7 @@ public class TestMultiRespectsLimits {
|
||||||
TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
|
TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
|
||||||
@Override
|
@Override
|
||||||
public boolean evaluate() throws Exception {
|
public boolean evaluate() throws Exception {
|
||||||
return regionServer.getOnlineRegions(tableName).get(0).getMaxFlushedSeqId() > 3;
|
return regionServer.getRegions(tableName).get(0).getMaxFlushedSeqId() > 3;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,7 +134,7 @@ public class TestReplicaWithCluster {
|
||||||
if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() <= 1) {
|
if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() <= 1) {
|
||||||
LOG.info("Throw Region Server Stopped Exceptoin for replica id " + replicaId);
|
LOG.info("Throw Region Server Stopped Exceptoin for replica id " + replicaId);
|
||||||
throw new RegionServerStoppedException("Server " +
|
throw new RegionServerStoppedException("Server " +
|
||||||
e.getEnvironment().getRegionServerServices().getServerName()
|
e.getEnvironment().getCoprocessorRegionServerServices().getServerName()
|
||||||
+ " not running");
|
+ " not running");
|
||||||
} else {
|
} else {
|
||||||
LOG.info("We're replica region " + replicaId);
|
LOG.info("We're replica region " + replicaId);
|
||||||
|
@ -151,7 +151,7 @@ public class TestReplicaWithCluster {
|
||||||
if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() <= 1) {
|
if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() <= 1) {
|
||||||
LOG.info("Throw Region Server Stopped Exceptoin for replica id " + replicaId);
|
LOG.info("Throw Region Server Stopped Exceptoin for replica id " + replicaId);
|
||||||
throw new RegionServerStoppedException("Server " +
|
throw new RegionServerStoppedException("Server " +
|
||||||
e.getEnvironment().getRegionServerServices().getServerName()
|
e.getEnvironment().getCoprocessorRegionServerServices().getServerName()
|
||||||
+ " not running");
|
+ " not running");
|
||||||
} else {
|
} else {
|
||||||
LOG.info("We're replica region " + replicaId);
|
LOG.info("We're replica region " + replicaId);
|
||||||
|
@ -179,8 +179,9 @@ public class TestReplicaWithCluster {
|
||||||
if (!e.getEnvironment().getRegion().getRegionInfo().isMetaRegion() && (replicaId == 0)) {
|
if (!e.getEnvironment().getRegion().getRegionInfo().isMetaRegion() && (replicaId == 0)) {
|
||||||
LOG.info("Get, throw Region Server Stopped Exceptoin for region " + e.getEnvironment()
|
LOG.info("Get, throw Region Server Stopped Exceptoin for region " + e.getEnvironment()
|
||||||
.getRegion().getRegionInfo());
|
.getRegion().getRegionInfo());
|
||||||
throw new RegionServerStoppedException("Server " +
|
throw new RegionServerStoppedException(
|
||||||
e.getEnvironment().getRegionServerServices().getServerName() + " not running");
|
"Server " + e.getEnvironment().getCoprocessorRegionServerServices().getServerName()
|
||||||
|
+ " not running");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG.info("Get, We're replica region " + replicaId);
|
LOG.info("Get, We're replica region " + replicaId);
|
||||||
|
@ -209,8 +210,9 @@ public class TestReplicaWithCluster {
|
||||||
LOG.info("Scan, throw Region Server Stopped Exceptoin for replica " + e.getEnvironment()
|
LOG.info("Scan, throw Region Server Stopped Exceptoin for replica " + e.getEnvironment()
|
||||||
.getRegion().getRegionInfo());
|
.getRegion().getRegionInfo());
|
||||||
|
|
||||||
throw new RegionServerStoppedException("Server " +
|
throw new RegionServerStoppedException(
|
||||||
e.getEnvironment().getRegionServerServices().getServerName() + " not running");
|
"Server " + e.getEnvironment().getCoprocessorRegionServerServices().getServerName()
|
||||||
|
+ " not running");
|
||||||
} else {
|
} else {
|
||||||
LOG.info("Scan, We're replica region " + replicaId);
|
LOG.info("Scan, We're replica region " + replicaId);
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.io.Reference;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
|
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.Leases;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
||||||
import org.apache.hadoop.hbase.regionserver.Region.Operation;
|
import org.apache.hadoop.hbase.regionserver.Region.Operation;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||||
|
@ -137,12 +136,6 @@ public class SimpleRegionObserver implements RegionObserver {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void start(CoprocessorEnvironment e) throws IOException {
|
public void start(CoprocessorEnvironment e) throws IOException {
|
||||||
// this only makes sure that leases and locks are available to coprocessors
|
|
||||||
// from external packages
|
|
||||||
RegionCoprocessorEnvironment re = (RegionCoprocessorEnvironment)e;
|
|
||||||
Leases leases = re.getRegionServerServices().getLeases();
|
|
||||||
leases.createLease(re.getRegion().getRegionInfo().getRegionNameAsString(), 2000, null);
|
|
||||||
leases.cancelLease(re.getRegion().getRegionInfo().getRegionNameAsString());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -300,7 +300,7 @@ public class TestRegionObserverScannerOpenHook {
|
||||||
table.put(put);
|
table.put(put);
|
||||||
|
|
||||||
HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
|
HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
|
||||||
List<Region> regions = rs.getOnlineRegions(desc.getTableName());
|
List<Region> regions = rs.getRegions(desc.getTableName());
|
||||||
assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
|
assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
|
||||||
Region region = regions.get(0);
|
Region region = regions.get(0);
|
||||||
admin.flushRegion(region.getRegionInfo().getRegionName());
|
admin.flushRegion(region.getRegionInfo().getRegionName());
|
||||||
|
|
|
@ -292,7 +292,7 @@ public class TestBlockReorder {
|
||||||
|
|
||||||
int nbTest = 0;
|
int nbTest = 0;
|
||||||
while (nbTest < 10) {
|
while (nbTest < 10) {
|
||||||
final List<Region> regions = targetRs.getOnlineRegions(h.getName());
|
final List<Region> regions = targetRs.getRegions(h.getName());
|
||||||
final CountDownLatch latch = new CountDownLatch(regions.size());
|
final CountDownLatch latch = new CountDownLatch(regions.size());
|
||||||
// listen for successful log rolls
|
// listen for successful log rolls
|
||||||
final WALActionsListener listener = new WALActionsListener.Base() {
|
final WALActionsListener listener = new WALActionsListener.Base() {
|
||||||
|
|
|
@ -263,18 +263,18 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void addToOnlineRegions(Region r) {
|
public void addRegion(Region r) {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean removeFromOnlineRegions(Region r, ServerName destination) {
|
public boolean removeRegion(Region r, ServerName destination) {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public HRegion getFromOnlineRegions(String encodedRegionName) {
|
public HRegion getRegion(String encodedRegionName) {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -468,7 +468,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<Region> getOnlineRegions() {
|
public List<Region> getRegions() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -535,7 +535,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<Region> getOnlineRegions(TableName tableName) throws IOException {
|
public List<Region> getRegions(TableName tableName) throws IOException {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,7 +258,7 @@ public class TestAssignmentListener {
|
||||||
admin.majorCompact(tableName);
|
admin.majorCompact(tableName);
|
||||||
mergeable = 0;
|
mergeable = 0;
|
||||||
for (JVMClusterUtil.RegionServerThread regionThread: miniCluster.getRegionServerThreads()) {
|
for (JVMClusterUtil.RegionServerThread regionThread: miniCluster.getRegionServerThreads()) {
|
||||||
for (Region region: regionThread.getRegionServer().getOnlineRegions(tableName)) {
|
for (Region region: regionThread.getRegionServer().getRegions(tableName)) {
|
||||||
mergeable += ((HRegion)region).isMergeable() ? 1 : 0;
|
mergeable += ((HRegion)region).isMergeable() ? 1 : 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -299,7 +299,7 @@ public class TestAssignmentListener {
|
||||||
MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster();
|
MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster();
|
||||||
int serverCount = 0;
|
int serverCount = 0;
|
||||||
for (JVMClusterUtil.RegionServerThread regionThread: miniCluster.getRegionServerThreads()) {
|
for (JVMClusterUtil.RegionServerThread regionThread: miniCluster.getRegionServerThreads()) {
|
||||||
if (!regionThread.getRegionServer().getOnlineRegions(TABLE_NAME).isEmpty()) {
|
if (!regionThread.getRegionServer().getRegions(TABLE_NAME).isEmpty()) {
|
||||||
++serverCount;
|
++serverCount;
|
||||||
}
|
}
|
||||||
if (serverCount > 1) {
|
if (serverCount > 1) {
|
||||||
|
|
|
@ -80,7 +80,7 @@ public class TestGetLastFlushedSequenceId {
|
||||||
Region region = null;
|
Region region = null;
|
||||||
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
||||||
HRegionServer hrs = rsts.get(i).getRegionServer();
|
HRegionServer hrs = rsts.get(i).getRegionServer();
|
||||||
for (Region r : hrs.getOnlineRegions(tableName)) {
|
for (Region r : hrs.getRegions(tableName)) {
|
||||||
region = r;
|
region = r;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -225,8 +225,8 @@ public class TestMasterFailover {
|
||||||
// region server should expire (how it can be verified?)
|
// region server should expire (how it can be verified?)
|
||||||
MetaTableLocator.setMetaLocation(activeMaster.getZooKeeper(),
|
MetaTableLocator.setMetaLocation(activeMaster.getZooKeeper(),
|
||||||
rs.getServerName(), State.OPENING);
|
rs.getServerName(), State.OPENING);
|
||||||
Region meta = rs.getFromOnlineRegions(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
|
Region meta = rs.getRegion(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
|
||||||
rs.removeFromOnlineRegions(meta, null);
|
rs.removeRegion(meta, null);
|
||||||
((HRegion)meta).close();
|
((HRegion)meta).close();
|
||||||
|
|
||||||
log("Aborting master");
|
log("Aborting master");
|
||||||
|
|
|
@ -417,7 +417,7 @@ public class TestRegionPlacement {
|
||||||
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
|
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
|
||||||
for (int i = 0; i < SLAVES; i++) {
|
for (int i = 0; i < SLAVES; i++) {
|
||||||
HRegionServer rs = cluster.getRegionServer(i);
|
HRegionServer rs = cluster.getRegionServer(i);
|
||||||
for (Region region: rs.getOnlineRegions(TableName.valueOf("testRegionAssignment"))) {
|
for (Region region: rs.getRegions(TableName.valueOf("testRegionAssignment"))) {
|
||||||
InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion(
|
InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion(
|
||||||
region.getRegionInfo().getEncodedName());
|
region.getRegionInfo().getEncodedName());
|
||||||
List<ServerName> favoredServerList = plan.getAssignmentMap().get(region.getRegionInfo());
|
List<ServerName> favoredServerList = plan.getAssignmentMap().get(region.getRegionInfo());
|
||||||
|
|
|
@ -544,7 +544,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
|
||||||
|
|
||||||
private void compactTable(TableName tableName) throws IOException {
|
private void compactTable(TableName tableName) throws IOException {
|
||||||
for(JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
|
for(JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
|
||||||
for(Region region : t.getRegionServer().getOnlineRegions(tableName)) {
|
for(Region region : t.getRegionServer().getRegions(tableName)) {
|
||||||
region.compact(true);
|
region.compact(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class TestRegionLocationFinder {
|
||||||
|
|
||||||
for (int i = 0; i < ServerNum; i++) {
|
for (int i = 0; i < ServerNum; i++) {
|
||||||
HRegionServer server = cluster.getRegionServer(i);
|
HRegionServer server = cluster.getRegionServer(i);
|
||||||
for (Region region : server.getOnlineRegions(tableName)) {
|
for (Region region : server.getRegions(tableName)) {
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ public class TestRegionLocationFinder {
|
||||||
public void testInternalGetTopBlockLocation() throws Exception {
|
public void testInternalGetTopBlockLocation() throws Exception {
|
||||||
for (int i = 0; i < ServerNum; i++) {
|
for (int i = 0; i < ServerNum; i++) {
|
||||||
HRegionServer server = cluster.getRegionServer(i);
|
HRegionServer server = cluster.getRegionServer(i);
|
||||||
for (Region region : server.getOnlineRegions(tableName)) {
|
for (Region region : server.getRegions(tableName)) {
|
||||||
// get region's hdfs block distribution by region and RegionLocationFinder,
|
// get region's hdfs block distribution by region and RegionLocationFinder,
|
||||||
// they should have same result
|
// they should have same result
|
||||||
HDFSBlocksDistribution blocksDistribution1 = region.getHDFSBlocksDistribution();
|
HDFSBlocksDistribution blocksDistribution1 = region.getHDFSBlocksDistribution();
|
||||||
|
@ -122,7 +122,7 @@ public class TestRegionLocationFinder {
|
||||||
public void testGetTopBlockLocations() throws Exception {
|
public void testGetTopBlockLocations() throws Exception {
|
||||||
for (int i = 0; i < ServerNum; i++) {
|
for (int i = 0; i < ServerNum; i++) {
|
||||||
HRegionServer server = cluster.getRegionServer(i);
|
HRegionServer server = cluster.getRegionServer(i);
|
||||||
for (Region region : server.getOnlineRegions(tableName)) {
|
for (Region region : server.getRegions(tableName)) {
|
||||||
List<ServerName> servers = finder.getTopBlockLocations(region
|
List<ServerName> servers = finder.getTopBlockLocations(region
|
||||||
.getRegionInfo());
|
.getRegionInfo());
|
||||||
// test table may have empty region
|
// test table may have empty region
|
||||||
|
@ -147,7 +147,7 @@ public class TestRegionLocationFinder {
|
||||||
finder.getCache().invalidateAll();
|
finder.getCache().invalidateAll();
|
||||||
for (int i = 0; i < ServerNum; i++) {
|
for (int i = 0; i < ServerNum; i++) {
|
||||||
HRegionServer server = cluster.getRegionServer(i);
|
HRegionServer server = cluster.getRegionServer(i);
|
||||||
List<Region> regions = server.getOnlineRegions(tableName);
|
List<Region> regions = server.getRegions(tableName);
|
||||||
if (regions.size() <= 0) {
|
if (regions.size() <= 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -150,7 +150,7 @@ public class TestRegionsOnMasterOptions {
|
||||||
try {
|
try {
|
||||||
Table t = TEST_UTIL.createMultiRegionTable(tn, HConstants.CATALOG_FAMILY, REGIONS);
|
Table t = TEST_UTIL.createMultiRegionTable(tn, HConstants.CATALOG_FAMILY, REGIONS);
|
||||||
LOG.info("Server: " + cluster.getMaster().getServerManager().getOnlineServersList());
|
LOG.info("Server: " + cluster.getMaster().getServerManager().getOnlineServersList());
|
||||||
List<Region> regions = cluster.getMaster().getOnlineRegions();
|
List<Region> regions = cluster.getMaster().getRegions();
|
||||||
int mActualCount = regions.size();
|
int mActualCount = regions.size();
|
||||||
if (masterCount == 0 || masterCount == SYSTEM_REGIONS) {
|
if (masterCount == 0 || masterCount == SYSTEM_REGIONS) {
|
||||||
// 0 means no regions on master.
|
// 0 means no regions on master.
|
||||||
|
@ -163,7 +163,7 @@ public class TestRegionsOnMasterOptions {
|
||||||
// thread though it is a regionserver so we have to check master and then below the
|
// thread though it is a regionserver so we have to check master and then below the
|
||||||
// regionservers.
|
// regionservers.
|
||||||
for (JVMClusterUtil.RegionServerThread rst: cluster.getRegionServerThreads()) {
|
for (JVMClusterUtil.RegionServerThread rst: cluster.getRegionServerThreads()) {
|
||||||
regions = rst.getRegionServer().getOnlineRegions();
|
regions = rst.getRegionServer().getRegions();
|
||||||
int rsActualCount = regions.size();
|
int rsActualCount = regions.size();
|
||||||
checkCount(rsActualCount, rsCount);
|
checkCount(rsActualCount, rsCount);
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ public class TestRegionsOnMasterOptions {
|
||||||
}
|
}
|
||||||
LOG.info("Cluster is up; running balancer");
|
LOG.info("Cluster is up; running balancer");
|
||||||
cluster.getMaster().balance();
|
cluster.getMaster().balance();
|
||||||
regions = cluster.getMaster().getOnlineRegions();
|
regions = cluster.getMaster().getRegions();
|
||||||
int mNewActualCount = regions.size();
|
int mNewActualCount = regions.size();
|
||||||
if (masterCount == 0 || masterCount == SYSTEM_REGIONS) {
|
if (masterCount == 0 || masterCount == SYSTEM_REGIONS) {
|
||||||
// 0 means no regions on master. After crash, should still be no regions on master.
|
// 0 means no regions on master. After crash, should still be no regions on master.
|
||||||
|
|
|
@ -328,7 +328,7 @@ public class TestSnapshotFromMaster {
|
||||||
.getRegionServerThreads();
|
.getRegionServerThreads();
|
||||||
HRegionServer hrs = null;
|
HRegionServer hrs = null;
|
||||||
for (RegionServerThread rs : regionServerThreads) {
|
for (RegionServerThread rs : regionServerThreads) {
|
||||||
if (!rs.getRegionServer().getOnlineRegions(TABLE_NAME).isEmpty()) {
|
if (!rs.getRegionServer().getRegions(TABLE_NAME).isEmpty()) {
|
||||||
hrs = rs.getRegionServer();
|
hrs = rs.getRegionServer();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,7 @@ public class TestFileSystemUtilizationChore {
|
||||||
.reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
|
.reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
|
||||||
|
|
||||||
final Region region = mockRegionWithSize(regionSizes);
|
final Region region = mockRegionWithSize(regionSizes);
|
||||||
when(rs.getOnlineRegions()).thenReturn(Arrays.asList(region));
|
when(rs.getRegions()).thenReturn(Arrays.asList(region));
|
||||||
chore.chore();
|
chore.chore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ public class TestFileSystemUtilizationChore {
|
||||||
.reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
|
.reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
|
||||||
|
|
||||||
final Region region = mockRegionWithSize(regionSizes);
|
final Region region = mockRegionWithSize(regionSizes);
|
||||||
when(rs.getOnlineRegions()).thenReturn(Arrays.asList(region));
|
when(rs.getRegions()).thenReturn(Arrays.asList(region));
|
||||||
chore.chore();
|
chore.chore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ public class TestFileSystemUtilizationChore {
|
||||||
final Region r1 = mockRegionWithSize(r1Sizes);
|
final Region r1 = mockRegionWithSize(r1Sizes);
|
||||||
final Region r2 = mockRegionWithSize(r2Sizes);
|
final Region r2 = mockRegionWithSize(r2Sizes);
|
||||||
final Region r3 = mockRegionWithSize(r3Sizes);
|
final Region r3 = mockRegionWithSize(r3Sizes);
|
||||||
when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2, r3));
|
when(rs.getRegions()).thenReturn(Arrays.asList(r1, r2, r3));
|
||||||
chore.chore();
|
chore.chore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,7 +173,7 @@ public class TestFileSystemUtilizationChore {
|
||||||
final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L));
|
final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L));
|
||||||
final Region r2 = mockRegionWithSize(Arrays.asList(1024L * 1024L));
|
final Region r2 = mockRegionWithSize(Arrays.asList(1024L * 1024L));
|
||||||
final Region r3 = mockRegionWithSize(Arrays.asList(10L * 1024L * 1024L));
|
final Region r3 = mockRegionWithSize(Arrays.asList(10L * 1024L * 1024L));
|
||||||
when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2, r3, lr1, lr2));
|
when(rs.getRegions()).thenReturn(Arrays.asList(r1, r2, r3, lr1, lr2));
|
||||||
|
|
||||||
chore.chore();
|
chore.chore();
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ public class TestFileSystemUtilizationChore {
|
||||||
final Region r2 = mockRegionWithSize(Arrays.asList(1024L * 1024L));
|
final Region r2 = mockRegionWithSize(Arrays.asList(1024L * 1024L));
|
||||||
final Region r3 = mockRegionWithSize(Arrays.asList(10L * 1024L * 1024L));
|
final Region r3 = mockRegionWithSize(Arrays.asList(10L * 1024L * 1024L));
|
||||||
// lr2 is no longer online, so it should be ignored
|
// lr2 is no longer online, so it should be ignored
|
||||||
when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2, r3, lr1));
|
when(rs.getRegions()).thenReturn(Arrays.asList(r1, r2, r3, lr1));
|
||||||
|
|
||||||
chore.chore();
|
chore.chore();
|
||||||
}
|
}
|
||||||
|
@ -229,7 +229,7 @@ public class TestFileSystemUtilizationChore {
|
||||||
|
|
||||||
final Region r1 = mockRegionWithSize(r1Sizes);
|
final Region r1 = mockRegionWithSize(r1Sizes);
|
||||||
final Region r2 = mockSplitParentRegionWithSize(r2Sizes);
|
final Region r2 = mockSplitParentRegionWithSize(r2Sizes);
|
||||||
when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2));
|
when(rs.getRegions()).thenReturn(Arrays.asList(r1, r2));
|
||||||
chore.chore();
|
chore.chore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,7 +251,7 @@ public class TestFileSystemUtilizationChore {
|
||||||
|
|
||||||
final Region r1 = mockRegionWithSize(r1Sizes);
|
final Region r1 = mockRegionWithSize(r1Sizes);
|
||||||
final Region r2 = mockRegionReplicaWithSize(r2Sizes);
|
final Region r2 = mockRegionReplicaWithSize(r2Sizes);
|
||||||
when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2));
|
when(rs.getRegions()).thenReturn(Arrays.asList(r1, r2));
|
||||||
chore.chore();
|
chore.chore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -278,7 +278,7 @@ public class TestFileSystemUtilizationChore {
|
||||||
|
|
||||||
final Region r1 = mockRegionWithHFileLinks(r1StoreFileSizes, r1HFileSizes);
|
final Region r1 = mockRegionWithHFileLinks(r1StoreFileSizes, r1HFileSizes);
|
||||||
final Region r2 = mockRegionWithHFileLinks(r2StoreFileSizes, r2HFileSizes);
|
final Region r2 = mockRegionWithHFileLinks(r2StoreFileSizes, r2HFileSizes);
|
||||||
when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2));
|
when(rs.getRegions()).thenReturn(Arrays.asList(r1, r2));
|
||||||
chore.chore();
|
chore.chore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ public class TestCompactionArchiveConcurrentClose {
|
||||||
RegionServerServices rss = mock(RegionServerServices.class);
|
RegionServerServices rss = mock(RegionServerServices.class);
|
||||||
List<Region> regions = new ArrayList<>();
|
List<Region> regions = new ArrayList<>();
|
||||||
regions.add(region);
|
regions.add(region);
|
||||||
when(rss.getOnlineRegions()).thenReturn(regions);
|
when(rss.getRegions()).thenReturn(regions);
|
||||||
|
|
||||||
// Create the cleaner object
|
// Create the cleaner object
|
||||||
CompactedHFilesDischarger cleaner =
|
CompactedHFilesDischarger cleaner =
|
||||||
|
|
|
@ -101,7 +101,7 @@ public class TestCompactionArchiveIOException {
|
||||||
RegionServerServices rss = mock(RegionServerServices.class);
|
RegionServerServices rss = mock(RegionServerServices.class);
|
||||||
List<Region> regions = new ArrayList<>();
|
List<Region> regions = new ArrayList<>();
|
||||||
regions.add(region);
|
regions.add(region);
|
||||||
when(rss.getOnlineRegions()).thenReturn(regions);
|
when(rss.getRegions()).thenReturn(regions);
|
||||||
|
|
||||||
// Create the cleaner object
|
// Create the cleaner object
|
||||||
final CompactedHFilesDischarger cleaner =
|
final CompactedHFilesDischarger cleaner =
|
||||||
|
|
|
@ -133,7 +133,7 @@ public class TestCompactionFileNotFound {
|
||||||
int numRegionsAfterSplit = 0;
|
int numRegionsAfterSplit = 0;
|
||||||
List<RegionServerThread> rst = util.getMiniHBaseCluster().getLiveRegionServerThreads();
|
List<RegionServerThread> rst = util.getMiniHBaseCluster().getLiveRegionServerThreads();
|
||||||
for (RegionServerThread t : rst) {
|
for (RegionServerThread t : rst) {
|
||||||
numRegionsAfterSplit += t.getRegionServer().getOnlineRegions(TEST_TABLE).size();
|
numRegionsAfterSplit += t.getRegionServer().getRegions(TEST_TABLE).size();
|
||||||
}
|
}
|
||||||
// Make sure that the split went through and all the regions are assigned
|
// Make sure that the split went through and all the regions are assigned
|
||||||
return (numRegionsAfterSplit == numRegionsBeforeSplit + 1
|
return (numRegionsAfterSplit == numRegionsBeforeSplit + 1
|
||||||
|
|
|
@ -125,7 +125,7 @@ public class TestCompactionInDeadRegionServer {
|
||||||
@Test
|
@Test
|
||||||
public void test() throws Exception {
|
public void test() throws Exception {
|
||||||
HRegionServer rsToSuspend = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
|
HRegionServer rsToSuspend = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
|
||||||
HRegion region = (HRegion) rsToSuspend.getOnlineRegions(TABLE_NAME).get(0);
|
HRegion region = (HRegion) rsToSuspend.getRegions(TABLE_NAME).get(0);
|
||||||
ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher();
|
ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher();
|
||||||
watcher.getRecoverableZooKeeper().delete(
|
watcher.getRecoverableZooKeeper().delete(
|
||||||
ZKUtil.joinZNode(watcher.getZNodePaths().rsZNode, rsToSuspend.getServerName().toString()),
|
ZKUtil.joinZNode(watcher.getZNodePaths().rsZNode, rsToSuspend.getServerName().toString()),
|
||||||
|
@ -137,7 +137,7 @@ public class TestCompactionInDeadRegionServer {
|
||||||
for (RegionServerThread thread : UTIL.getHBaseCluster().getRegionServerThreads()) {
|
for (RegionServerThread thread : UTIL.getHBaseCluster().getRegionServerThreads()) {
|
||||||
HRegionServer rs = thread.getRegionServer();
|
HRegionServer rs = thread.getRegionServer();
|
||||||
if (rs != rsToSuspend) {
|
if (rs != rsToSuspend) {
|
||||||
return !rs.getOnlineRegions(TABLE_NAME).isEmpty();
|
return !rs.getRegions(TABLE_NAME).isEmpty();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -138,7 +138,7 @@ public class TestCompactionState {
|
||||||
ht = TEST_UTIL.createTable(table, families);
|
ht = TEST_UTIL.createTable(table, families);
|
||||||
loadData(ht, families, 3000, flushes);
|
loadData(ht, families, 3000, flushes);
|
||||||
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
|
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
|
||||||
List<Region> regions = rs.getOnlineRegions(table);
|
List<Region> regions = rs.getRegions(table);
|
||||||
int countBefore = countStoreFilesInFamilies(regions, families);
|
int countBefore = countStoreFilesInFamilies(regions, families);
|
||||||
int countBeforeSingleFamily = countStoreFilesInFamily(regions, family);
|
int countBeforeSingleFamily = countStoreFilesInFamily(regions, family);
|
||||||
assertTrue(countBefore > 0); // there should be some data files
|
assertTrue(countBefore > 0); // there should be some data files
|
||||||
|
|
|
@ -209,7 +209,7 @@ public class TestEncryptionKeyRotation {
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
boolean compacted = false;
|
boolean compacted = false;
|
||||||
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
|
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
|
||||||
.getOnlineRegions(tableName)) {
|
.getRegions(tableName)) {
|
||||||
for (HStore store : ((HRegion) region).getStores()) {
|
for (HStore store : ((HRegion) region).getStores()) {
|
||||||
compacted = false;
|
compacted = false;
|
||||||
while (!compacted) {
|
while (!compacted) {
|
||||||
|
@ -235,7 +235,7 @@ public class TestEncryptionKeyRotation {
|
||||||
private static List<Path> findStorefilePaths(TableName tableName) throws Exception {
|
private static List<Path> findStorefilePaths(TableName tableName) throws Exception {
|
||||||
List<Path> paths = new ArrayList<>();
|
List<Path> paths = new ArrayList<>();
|
||||||
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
|
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
|
||||||
.getOnlineRegions(tableName)) {
|
.getRegions(tableName)) {
|
||||||
for (HStore store : ((HRegion) region).getStores()) {
|
for (HStore store : ((HRegion) region).getStores()) {
|
||||||
for (HStoreFile storefile : store.getStorefiles()) {
|
for (HStoreFile storefile : store.getStorefiles()) {
|
||||||
paths.add(storefile.getPath());
|
paths.add(storefile.getPath());
|
||||||
|
@ -248,7 +248,7 @@ public class TestEncryptionKeyRotation {
|
||||||
private static List<Path> findCompactedStorefilePaths(TableName tableName) throws Exception {
|
private static List<Path> findCompactedStorefilePaths(TableName tableName) throws Exception {
|
||||||
List<Path> paths = new ArrayList<>();
|
List<Path> paths = new ArrayList<>();
|
||||||
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
|
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
|
||||||
.getOnlineRegions(tableName)) {
|
.getRegions(tableName)) {
|
||||||
for (HStore store : ((HRegion) region).getStores()) {
|
for (HStore store : ((HRegion) region).getStores()) {
|
||||||
Collection<HStoreFile> compactedfiles =
|
Collection<HStoreFile> compactedfiles =
|
||||||
store.getStoreEngine().getStoreFileManager().getCompactedfiles();
|
store.getStoreEngine().getStoreFileManager().getCompactedfiles();
|
||||||
|
|
|
@ -54,7 +54,7 @@ public class TestEncryptionRandomKeying {
|
||||||
private static List<Path> findStorefilePaths(TableName tableName) throws Exception {
|
private static List<Path> findStorefilePaths(TableName tableName) throws Exception {
|
||||||
List<Path> paths = new ArrayList<>();
|
List<Path> paths = new ArrayList<>();
|
||||||
for (Region region:
|
for (Region region:
|
||||||
TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(htd.getTableName())) {
|
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegions(htd.getTableName())) {
|
||||||
for (HStore store : ((HRegion) region).getStores()) {
|
for (HStore store : ((HRegion) region).getStores()) {
|
||||||
for (HStoreFile storefile : store.getStorefiles()) {
|
for (HStoreFile storefile : store.getStorefiles()) {
|
||||||
paths.add(storefile.getPath());
|
paths.add(storefile.getPath());
|
||||||
|
|
|
@ -181,7 +181,7 @@ public class TestHRegionReplayEvents {
|
||||||
primaryRegion.close();
|
primaryRegion.close();
|
||||||
List<Region> regions = new ArrayList<>();
|
List<Region> regions = new ArrayList<>();
|
||||||
regions.add(primaryRegion);
|
regions.add(primaryRegion);
|
||||||
when(rss.getOnlineRegions()).thenReturn(regions);
|
when(rss.getRegions()).thenReturn(regions);
|
||||||
|
|
||||||
primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
|
primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
|
||||||
secondaryRegion = HRegion.openHRegion(secondaryHri, htd, null, CONF, rss, null);
|
secondaryRegion = HRegion.openHRegion(secondaryHri, htd, null, CONF, rss, null);
|
||||||
|
@ -1393,7 +1393,7 @@ public class TestHRegionReplayEvents {
|
||||||
primaryRegion.compactStores();
|
primaryRegion.compactStores();
|
||||||
List<Region> regions = new ArrayList<>();
|
List<Region> regions = new ArrayList<>();
|
||||||
regions.add(primaryRegion);
|
regions.add(primaryRegion);
|
||||||
when(rss.getOnlineRegions()).thenReturn(regions);
|
when(rss.getRegions()).thenReturn(regions);
|
||||||
CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(100, null, rss, false);
|
CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(100, null, rss, false);
|
||||||
cleaner.chore();
|
cleaner.chore();
|
||||||
secondaryRegion.refreshStoreFiles();
|
secondaryRegion.refreshStoreFiles();
|
||||||
|
|
|
@ -330,7 +330,7 @@ public class TestPerColumnFamilyFlush {
|
||||||
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
|
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
|
||||||
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
||||||
HRegionServer hrs = rsts.get(i).getRegionServer();
|
HRegionServer hrs = rsts.get(i).getRegionServer();
|
||||||
for (Region region : hrs.getOnlineRegions(tableName)) {
|
for (Region region : hrs.getRegions(tableName)) {
|
||||||
return Pair.newPair(region, hrs);
|
return Pair.newPair(region, hrs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,7 @@ public class TestRegionFavoredNodes {
|
||||||
// them as favored nodes through the region.
|
// them as favored nodes through the region.
|
||||||
for (int i = 0; i < REGION_SERVERS; i++) {
|
for (int i = 0; i < REGION_SERVERS; i++) {
|
||||||
HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i);
|
HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i);
|
||||||
List<Region> regions = server.getOnlineRegions(TABLE_NAME);
|
List<Region> regions = server.getRegions(TABLE_NAME);
|
||||||
for (Region region : regions) {
|
for (Region region : regions) {
|
||||||
List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>favoredNodes =
|
List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>favoredNodes =
|
||||||
new ArrayList<>(3);
|
new ArrayList<>(3);
|
||||||
|
@ -142,7 +142,7 @@ public class TestRegionFavoredNodes {
|
||||||
// they are consistent with the favored nodes for that region.
|
// they are consistent with the favored nodes for that region.
|
||||||
for (int i = 0; i < REGION_SERVERS; i++) {
|
for (int i = 0; i < REGION_SERVERS; i++) {
|
||||||
HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i);
|
HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i);
|
||||||
List<Region> regions = server.getOnlineRegions(TABLE_NAME);
|
List<Region> regions = server.getRegions(TABLE_NAME);
|
||||||
for (Region region : regions) {
|
for (Region region : regions) {
|
||||||
List<String> files = region.getStoreFileList(new byte[][]{COLUMN_FAMILY});
|
List<String> files = region.getStoreFileList(new byte[][]{COLUMN_FAMILY});
|
||||||
for (String file : files) {
|
for (String file : files) {
|
||||||
|
|
|
@ -185,7 +185,7 @@ public class TestRegionReplicaFailover {
|
||||||
// read from it the same data from primary and secondaries
|
// read from it the same data from primary and secondaries
|
||||||
boolean aborted = false;
|
boolean aborted = false;
|
||||||
for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
|
for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
|
||||||
for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) {
|
for (Region r : rs.getRegionServer().getRegions(htd.getTableName())) {
|
||||||
if (r.getRegionInfo().getReplicaId() == 0) {
|
if (r.getRegionInfo().getReplicaId() == 0) {
|
||||||
LOG.info("Aborting region server hosting primary region replica");
|
LOG.info("Aborting region server hosting primary region replica");
|
||||||
rs.getRegionServer().abort("for test");
|
rs.getRegionServer().abort("for test");
|
||||||
|
@ -247,7 +247,7 @@ public class TestRegionReplicaFailover {
|
||||||
// read from it the same data
|
// read from it the same data
|
||||||
boolean aborted = false;
|
boolean aborted = false;
|
||||||
for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
|
for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
|
||||||
for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) {
|
for (Region r : rs.getRegionServer().getRegions(htd.getTableName())) {
|
||||||
if (r.getRegionInfo().getReplicaId() == 1) {
|
if (r.getRegionInfo().getReplicaId() == 1) {
|
||||||
LOG.info("Aborting region server hosting secondary region replica");
|
LOG.info("Aborting region server hosting secondary region replica");
|
||||||
rs.getRegionServer().abort("for test");
|
rs.getRegionServer().abort("for test");
|
||||||
|
@ -308,7 +308,7 @@ public class TestRegionReplicaFailover {
|
||||||
try {
|
try {
|
||||||
boolean aborted = false;
|
boolean aborted = false;
|
||||||
for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
|
for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
|
||||||
for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) {
|
for (Region r : rs.getRegionServer().getRegions(htd.getTableName())) {
|
||||||
if (r.getRegionInfo().getReplicaId() == 1) {
|
if (r.getRegionInfo().getReplicaId() == 1) {
|
||||||
LOG.info("Aborting region server hosting secondary region replica");
|
LOG.info("Aborting region server hosting secondary region replica");
|
||||||
rs.getRegionServer().abort("for test");
|
rs.getRegionServer().abort("for test");
|
||||||
|
|
|
@ -162,7 +162,7 @@ public class TestRegionReplicas {
|
||||||
openRegion(HTU, getRS(), hriSecondary);
|
openRegion(HTU, getRS(), hriSecondary);
|
||||||
|
|
||||||
// first try directly against region
|
// first try directly against region
|
||||||
region = getRS().getFromOnlineRegions(hriSecondary.getEncodedName());
|
region = getRS().getRegion(hriSecondary.getEncodedName());
|
||||||
assertGet(region, 42, true);
|
assertGet(region, 42, true);
|
||||||
|
|
||||||
assertGetRpc(hriSecondary, 42, true);
|
assertGetRpc(hriSecondary, 42, true);
|
||||||
|
@ -259,7 +259,7 @@ public class TestRegionReplicas {
|
||||||
Threads.sleep(4 * refreshPeriod);
|
Threads.sleep(4 * refreshPeriod);
|
||||||
|
|
||||||
LOG.info("Checking results from secondary region replica");
|
LOG.info("Checking results from secondary region replica");
|
||||||
Region secondaryRegion = getRS().getFromOnlineRegions(hriSecondary.getEncodedName());
|
Region secondaryRegion = getRS().getRegion(hriSecondary.getEncodedName());
|
||||||
Assert.assertEquals(1, secondaryRegion.getStore(f).getStorefilesCount());
|
Assert.assertEquals(1, secondaryRegion.getStore(f).getStorefilesCount());
|
||||||
|
|
||||||
assertGet(secondaryRegion, 42, true);
|
assertGet(secondaryRegion, 42, true);
|
||||||
|
@ -446,11 +446,11 @@ public class TestRegionReplicas {
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
Region primaryRegion = getRS().getFromOnlineRegions(hriPrimary.getEncodedName());
|
Region primaryRegion = getRS().getRegion(hriPrimary.getEncodedName());
|
||||||
Assert.assertEquals(3, primaryRegion.getStore(f).getStorefilesCount());
|
Assert.assertEquals(3, primaryRegion.getStore(f).getStorefilesCount());
|
||||||
|
|
||||||
// Refresh store files on the secondary
|
// Refresh store files on the secondary
|
||||||
Region secondaryRegion = getRS().getFromOnlineRegions(hriSecondary.getEncodedName());
|
Region secondaryRegion = getRS().getRegion(hriSecondary.getEncodedName());
|
||||||
secondaryRegion.getStore(f).refreshStoreFiles();
|
secondaryRegion.getStore(f).refreshStoreFiles();
|
||||||
Assert.assertEquals(3, secondaryRegion.getStore(f).getStorefilesCount());
|
Assert.assertEquals(3, secondaryRegion.getStore(f).getStorefilesCount());
|
||||||
|
|
||||||
|
|
|
@ -168,7 +168,7 @@ public class TestRegionServerAbort {
|
||||||
public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,
|
public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,
|
||||||
Durability durability) throws IOException {
|
Durability durability) throws IOException {
|
||||||
if (put.getAttribute(DO_ABORT) != null) {
|
if (put.getAttribute(DO_ABORT) != null) {
|
||||||
HRegionServer rs = (HRegionServer) c.getEnvironment().getRegionServerServices();
|
HRegionServer rs = (HRegionServer) c.getEnvironment().getCoprocessorRegionServerServices();
|
||||||
LOG.info("Triggering abort for regionserver " + rs.getServerName());
|
LOG.info("Triggering abort for regionserver " + rs.getServerName());
|
||||||
rs.abort("Aborting for test");
|
rs.abort("Aborting for test");
|
||||||
}
|
}
|
||||||
|
|
|
@ -496,7 +496,7 @@ public class TestRegionServerMetrics {
|
||||||
byte[] val = Bytes.toBytes("mobdata");
|
byte[] val = Bytes.toBytes("mobdata");
|
||||||
try {
|
try {
|
||||||
Table table = TEST_UTIL.createTable(htd, new byte[0][0], conf);
|
Table table = TEST_UTIL.createTable(htd, new byte[0][0], conf);
|
||||||
Region region = rs.getOnlineRegions(tableName).get(0);
|
Region region = rs.getRegions(tableName).get(0);
|
||||||
for (int insertCount = 0; insertCount < numHfiles; insertCount++) {
|
for (int insertCount = 0; insertCount < numHfiles; insertCount++) {
|
||||||
Put p = new Put(Bytes.toBytes(insertCount));
|
Put p = new Put(Bytes.toBytes(insertCount));
|
||||||
p.addColumn(cf, qualifier, val);
|
p.addColumn(cf, qualifier, val);
|
||||||
|
|
|
@ -105,7 +105,7 @@ public class TestRegionSplitPolicy {
|
||||||
// return 'online regions'.
|
// return 'online regions'.
|
||||||
RegionServerServices rss = Mockito.mock(RegionServerServices.class);
|
RegionServerServices rss = Mockito.mock(RegionServerServices.class);
|
||||||
final List<Region> regions = new ArrayList<>();
|
final List<Region> regions = new ArrayList<>();
|
||||||
Mockito.when(rss.getOnlineRegions(TABLENAME)).thenReturn(regions);
|
Mockito.when(rss.getRegions(TABLENAME)).thenReturn(regions);
|
||||||
Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
|
Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
|
||||||
// Set max size for this 'table'.
|
// Set max size for this 'table'.
|
||||||
long maxSplitSize = 1024L;
|
long maxSplitSize = 1024L;
|
||||||
|
@ -164,7 +164,7 @@ public class TestRegionSplitPolicy {
|
||||||
|
|
||||||
RegionServerServices rss = Mockito.mock(RegionServerServices.class);
|
RegionServerServices rss = Mockito.mock(RegionServerServices.class);
|
||||||
final List<Region> regions = new ArrayList<>();
|
final List<Region> regions = new ArrayList<>();
|
||||||
Mockito.when(rss.getOnlineRegions(TABLENAME)).thenReturn(regions);
|
Mockito.when(rss.getRegions(TABLENAME)).thenReturn(regions);
|
||||||
Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
|
Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
|
||||||
Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(0L);
|
Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(0L);
|
||||||
Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(0L);
|
Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(0L);
|
||||||
|
|
|
@ -95,7 +95,7 @@ public class TestSplitWalDataLoss {
|
||||||
@Test
|
@Test
|
||||||
public void test() throws IOException, InterruptedException {
|
public void test() throws IOException, InterruptedException {
|
||||||
final HRegionServer rs = testUtil.getRSForFirstRegionInTable(tableName);
|
final HRegionServer rs = testUtil.getRSForFirstRegionInTable(tableName);
|
||||||
final HRegion region = (HRegion) rs.getOnlineRegions(tableName).get(0);
|
final HRegion region = (HRegion) rs.getRegions(tableName).get(0);
|
||||||
HRegion spiedRegion = spy(region);
|
HRegion spiedRegion = spy(region);
|
||||||
final MutableBoolean flushed = new MutableBoolean(false);
|
final MutableBoolean flushed = new MutableBoolean(false);
|
||||||
final MutableBoolean reported = new MutableBoolean(false);
|
final MutableBoolean reported = new MutableBoolean(false);
|
||||||
|
|
|
@ -78,7 +78,7 @@ public class TestCompactedHFilesDischarger {
|
||||||
rss = mock(RegionServerServices.class);
|
rss = mock(RegionServerServices.class);
|
||||||
List<Region> regions = new ArrayList<>(1);
|
List<Region> regions = new ArrayList<>(1);
|
||||||
regions.add(region);
|
regions.add(region);
|
||||||
when(rss.getOnlineRegions()).thenReturn(regions);
|
when(rss.getRegions()).thenReturn(regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
|
|
@ -70,7 +70,7 @@ public class TestFIFOCompactionPolicy {
|
||||||
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
|
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
|
||||||
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
||||||
HRegionServer hrs = rsts.get(i).getRegionServer();
|
HRegionServer hrs = rsts.get(i).getRegionServer();
|
||||||
for (Region region : hrs.getOnlineRegions(tableName)) {
|
for (Region region : hrs.getRegions(tableName)) {
|
||||||
return region.getStores().iterator().next();
|
return region.getStores().iterator().next();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,7 +76,7 @@ public class TestCompactionWithThroughputController {
|
||||||
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
|
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
|
||||||
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
||||||
HRegionServer hrs = rsts.get(i).getRegionServer();
|
HRegionServer hrs = rsts.get(i).getRegionServer();
|
||||||
for (Region region : hrs.getOnlineRegions(tableName)) {
|
for (Region region : hrs.getRegions(tableName)) {
|
||||||
return region.getStores().iterator().next();
|
return region.getStores().iterator().next();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ public class TestFlushWithThroughputController {
|
||||||
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
|
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
|
||||||
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
|
||||||
HRegionServer hrs = rsts.get(i).getRegionServer();
|
HRegionServer hrs = rsts.get(i).getRegionServer();
|
||||||
for (Region region : hrs.getOnlineRegions(tableName)) {
|
for (Region region : hrs.getRegions(tableName)) {
|
||||||
return region.getStores().iterator().next();
|
return region.getStores().iterator().next();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -165,7 +165,7 @@ public class TestFlushWithThroughputController {
|
||||||
HRegionServer regionServer = hbtu.getRSForFirstRegionInTable(tableName);
|
HRegionServer regionServer = hbtu.getRSForFirstRegionInTable(tableName);
|
||||||
PressureAwareFlushThroughputController throughputController =
|
PressureAwareFlushThroughputController throughputController =
|
||||||
(PressureAwareFlushThroughputController) regionServer.getFlushThroughputController();
|
(PressureAwareFlushThroughputController) regionServer.getFlushThroughputController();
|
||||||
for (Region region : regionServer.getOnlineRegions()) {
|
for (Region region : regionServer.getRegions()) {
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
}
|
}
|
||||||
assertEquals(0.0, regionServer.getFlushPressure(), EPSILON);
|
assertEquals(0.0, regionServer.getFlushPressure(), EPSILON);
|
||||||
|
|
|
@ -189,7 +189,7 @@ public abstract class AbstractTestLogRolling {
|
||||||
this.tableName = getName();
|
this.tableName = getName();
|
||||||
// TODO: Why does this write data take for ever?
|
// TODO: Why does this write data take for ever?
|
||||||
startAndWriteData();
|
startAndWriteData();
|
||||||
HRegionInfo region = server.getOnlineRegions(TableName.valueOf(tableName)).get(0)
|
HRegionInfo region = server.getRegions(TableName.valueOf(tableName)).get(0)
|
||||||
.getRegionInfo();
|
.getRegionInfo();
|
||||||
final WAL log = server.getWAL(region);
|
final WAL log = server.getWAL(region);
|
||||||
LOG.info("after writing there are " + AbstractFSWALProvider.getNumRolledLogFiles(log) + " log files");
|
LOG.info("after writing there are " + AbstractFSWALProvider.getNumRolledLogFiles(log) + " log files");
|
||||||
|
@ -249,7 +249,7 @@ public abstract class AbstractTestLogRolling {
|
||||||
table = createTestTable(getName());
|
table = createTestTable(getName());
|
||||||
|
|
||||||
server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
|
server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
|
||||||
Region region = server.getOnlineRegions(table.getName()).get(0);
|
Region region = server.getRegions(table.getName()).get(0);
|
||||||
final WAL log = server.getWAL(region.getRegionInfo());
|
final WAL log = server.getWAL(region.getRegionInfo());
|
||||||
Store s = region.getStore(HConstants.CATALOG_FAMILY);
|
Store s = region.getStore(HConstants.CATALOG_FAMILY);
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ public class TestAsyncLogRolling extends AbstractTestLogRolling {
|
||||||
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
|
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
|
||||||
doPut(table, 1);
|
doPut(table, 1);
|
||||||
server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
|
server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
|
||||||
HRegionInfo hri = server.getOnlineRegions(table.getName()).get(0).getRegionInfo();
|
HRegionInfo hri = server.getRegions(table.getName()).get(0).getRegionInfo();
|
||||||
AsyncFSWAL wal = (AsyncFSWAL) server.getWAL(hri);
|
AsyncFSWAL wal = (AsyncFSWAL) server.getWAL(hri);
|
||||||
int numRolledLogFiles = AsyncFSWALProvider.getNumRolledLogFiles(wal);
|
int numRolledLogFiles = AsyncFSWALProvider.getNumRolledLogFiles(wal);
|
||||||
DatanodeInfo[] dnInfos = wal.getPipeline();
|
DatanodeInfo[] dnInfos = wal.getPipeline();
|
||||||
|
|
|
@ -140,7 +140,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
|
||||||
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
|
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
|
||||||
|
|
||||||
server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
|
server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
|
||||||
HRegionInfo region = server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo();
|
HRegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo();
|
||||||
final FSHLog log = (FSHLog) server.getWAL(region);
|
final FSHLog log = (FSHLog) server.getWAL(region);
|
||||||
final AtomicBoolean lowReplicationHookCalled = new AtomicBoolean(false);
|
final AtomicBoolean lowReplicationHookCalled = new AtomicBoolean(false);
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
|
||||||
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
|
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
|
||||||
|
|
||||||
server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
|
server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
|
||||||
HRegionInfo region = server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo();
|
HRegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo();
|
||||||
final WAL log = server.getWAL(region);
|
final WAL log = server.getWAL(region);
|
||||||
final List<Path> paths = new ArrayList<>(1);
|
final List<Path> paths = new ArrayList<>(1);
|
||||||
final List<Integer> preLogRolledCalled = new ArrayList<>();
|
final List<Integer> preLogRolledCalled = new ArrayList<>();
|
||||||
|
|
|
@ -247,7 +247,7 @@ public class TestRegionReplicaReplicationEndpoint {
|
||||||
|
|
||||||
for (int i=0; i < NB_SERVERS; i++) {
|
for (int i=0; i < NB_SERVERS; i++) {
|
||||||
HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(i);
|
HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(i);
|
||||||
List<Region> onlineRegions = rs.getOnlineRegions(tableName);
|
List<Region> onlineRegions = rs.getRegions(tableName);
|
||||||
for (Region region : onlineRegions) {
|
for (Region region : onlineRegions) {
|
||||||
regions[region.getRegionInfo().getReplicaId()] = region;
|
regions[region.getRegionInfo().getReplicaId()] = region;
|
||||||
}
|
}
|
||||||
|
|
|
@ -176,7 +176,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
|
||||||
// replay the edits to the secondary using replay callable
|
// replay the edits to the secondary using replay callable
|
||||||
replicateUsingCallable(connection, entries);
|
replicateUsingCallable(connection, entries);
|
||||||
|
|
||||||
Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
|
Region region = rs0.getRegion(hriSecondary.getEncodedName());
|
||||||
HTU.verifyNumericRows(region, f, 0, 1000);
|
HTU.verifyNumericRows(region, f, 0, 1000);
|
||||||
|
|
||||||
HTU.deleteNumericRows(table, f, 0, 1000);
|
HTU.deleteNumericRows(table, f, 0, 1000);
|
||||||
|
@ -216,7 +216,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
|
||||||
// replay the edits to the secondary using replay callable
|
// replay the edits to the secondary using replay callable
|
||||||
replicateUsingCallable(connection, entries);
|
replicateUsingCallable(connection, entries);
|
||||||
|
|
||||||
Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
|
Region region = rs0.getRegion(hriSecondary.getEncodedName());
|
||||||
HTU.verifyNumericRows(region, f, 0, 1000);
|
HTU.verifyNumericRows(region, f, 0, 1000);
|
||||||
|
|
||||||
HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary
|
HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary
|
||||||
|
@ -228,7 +228,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
|
||||||
// replicate the new data
|
// replicate the new data
|
||||||
replicateUsingCallable(connection, entries);
|
replicateUsingCallable(connection, entries);
|
||||||
|
|
||||||
region = rs1.getFromOnlineRegions(hriSecondary.getEncodedName());
|
region = rs1.getRegion(hriSecondary.getEncodedName());
|
||||||
// verify the new data. old data may or may not be there
|
// verify the new data. old data may or may not be there
|
||||||
HTU.verifyNumericRows(region, f, 1000, 2000);
|
HTU.verifyNumericRows(region, f, 1000, 2000);
|
||||||
|
|
||||||
|
@ -261,7 +261,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
|
||||||
replicator.replicate(new ReplicateContext().setEntries(Lists.newArrayList(entries))
|
replicator.replicate(new ReplicateContext().setEntries(Lists.newArrayList(entries))
|
||||||
.setWalGroupId(fakeWalGroupId));
|
.setWalGroupId(fakeWalGroupId));
|
||||||
|
|
||||||
Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
|
Region region = rs0.getRegion(hriSecondary.getEncodedName());
|
||||||
HTU.verifyNumericRows(region, f, 0, 1000);
|
HTU.verifyNumericRows(region, f, 0, 1000);
|
||||||
|
|
||||||
HTU.deleteNumericRows(table, f, 0, 1000);
|
HTU.deleteNumericRows(table, f, 0, 1000);
|
||||||
|
|
|
@ -2148,7 +2148,7 @@ public class TestAccessController extends SecureTestUtil {
|
||||||
|
|
||||||
final int RETRIES_LIMIT = 10;
|
final int RETRIES_LIMIT = 10;
|
||||||
int retries = 0;
|
int retries = 0;
|
||||||
while (newRs.getOnlineRegions(TEST_TABLE2).size() < 1 && retries < RETRIES_LIMIT) {
|
while (newRs.getRegions(TEST_TABLE2).size() < 1 && retries < RETRIES_LIMIT) {
|
||||||
LOG.debug("Waiting for region to be opened. Already retried " + retries
|
LOG.debug("Waiting for region to be opened. Already retried " + retries
|
||||||
+ " times.");
|
+ " times.");
|
||||||
try {
|
try {
|
||||||
|
@ -2609,7 +2609,7 @@ public class TestAccessController extends SecureTestUtil {
|
||||||
for (JVMClusterUtil.RegionServerThread thread:
|
for (JVMClusterUtil.RegionServerThread thread:
|
||||||
TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
|
TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
|
||||||
HRegionServer rs = thread.getRegionServer();
|
HRegionServer rs = thread.getRegionServer();
|
||||||
for (Region region: rs.getOnlineRegions(TEST_TABLE)) {
|
for (Region region: rs.getRegions(TEST_TABLE)) {
|
||||||
region.getCoprocessorHost().load(PingCoprocessor.class,
|
region.getCoprocessorHost().load(PingCoprocessor.class,
|
||||||
Coprocessor.PRIORITY_USER, conf);
|
Coprocessor.PRIORITY_USER, conf);
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.ipc.ServerRpcController;
|
||||||
import org.apache.hadoop.hbase.ipc.SimpleRpcServer;
|
import org.apache.hadoop.hbase.ipc.SimpleRpcServer;
|
||||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.CoprocessorRegionServerServices;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.security.SecurityInfo;
|
import org.apache.hadoop.hbase.security.SecurityInfo;
|
||||||
|
@ -265,7 +266,7 @@ public class TestTokenAuthentication {
|
||||||
public HRegion getRegion() { return null; }
|
public HRegion getRegion() { return null; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RegionServerServices getRegionServerServices() {
|
public CoprocessorRegionServerServices getCoprocessorRegionServerServices() {
|
||||||
return mockServices;
|
return mockServices;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -307,7 +307,7 @@ public abstract class TestVisibilityLabels {
|
||||||
List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster()
|
List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster()
|
||||||
.getRegionServerThreads();
|
.getRegionServerThreads();
|
||||||
for (RegionServerThread rsThread : regionServerThreads) {
|
for (RegionServerThread rsThread : regionServerThreads) {
|
||||||
List<Region> onlineRegions = rsThread.getRegionServer().getOnlineRegions(
|
List<Region> onlineRegions = rsThread.getRegionServer().getRegions(
|
||||||
LABELS_TABLE_NAME);
|
LABELS_TABLE_NAME);
|
||||||
if (onlineRegions.size() > 0) {
|
if (onlineRegions.size() > 0) {
|
||||||
rsThread.getRegionServer().abort("Aborting ");
|
rsThread.getRegionServer().abort("Aborting ");
|
||||||
|
@ -341,7 +341,7 @@ public abstract class TestVisibilityLabels {
|
||||||
for (RegionServerThread rsThread : regionServerThreads) {
|
for (RegionServerThread rsThread : regionServerThreads) {
|
||||||
while (true) {
|
while (true) {
|
||||||
if (!rsThread.getRegionServer().isAborted()) {
|
if (!rsThread.getRegionServer().isAborted()) {
|
||||||
List<Region> onlineRegions = rsThread.getRegionServer().getOnlineRegions(
|
List<Region> onlineRegions = rsThread.getRegionServer().getRegions(
|
||||||
LABELS_TABLE_NAME);
|
LABELS_TABLE_NAME);
|
||||||
if (onlineRegions.size() > 0) {
|
if (onlineRegions.size() > 0) {
|
||||||
break;
|
break;
|
||||||
|
@ -392,13 +392,13 @@ public abstract class TestVisibilityLabels {
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
while (regionServer.getOnlineRegions(LABELS_TABLE_NAME).isEmpty()) {
|
while (regionServer.getRegions(LABELS_TABLE_NAME).isEmpty()) {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(10);
|
Thread.sleep(10);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Region labelsTableRegion = regionServer.getOnlineRegions(LABELS_TABLE_NAME).get(0);
|
Region labelsTableRegion = regionServer.getRegions(LABELS_TABLE_NAME).get(0);
|
||||||
while (labelsTableRegion.isRecovering()) {
|
while (labelsTableRegion.isRecovering()) {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(10);
|
Thread.sleep(10);
|
||||||
|
|
|
@ -754,7 +754,7 @@ public final class SnapshotTestingUtils {
|
||||||
final TableName tableName)
|
final TableName tableName)
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
|
HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
|
||||||
List<Region> onlineRegions = rs.getOnlineRegions(tableName);
|
List<Region> onlineRegions = rs.getRegions(tableName);
|
||||||
for (Region region : onlineRegions) {
|
for (Region region : onlineRegions) {
|
||||||
region.waitForFlushesAndCompactions();
|
region.waitForFlushesAndCompactions();
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,7 +142,7 @@ public class TestHBaseFsckEncryption {
|
||||||
private List<Path> findStorefilePaths(TableName tableName) throws Exception {
|
private List<Path> findStorefilePaths(TableName tableName) throws Exception {
|
||||||
List<Path> paths = new ArrayList<>();
|
List<Path> paths = new ArrayList<>();
|
||||||
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
|
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
|
||||||
.getOnlineRegions(htd.getTableName())) {
|
.getRegions(htd.getTableName())) {
|
||||||
for (HStore store : ((HRegion) region).getStores()) {
|
for (HStore store : ((HRegion) region).getStores()) {
|
||||||
for (HStoreFile storefile : store.getStorefiles()) {
|
for (HStoreFile storefile : store.getStorefiles()) {
|
||||||
paths.add(storefile.getPath());
|
paths.add(storefile.getPath());
|
||||||
|
|
|
@ -451,7 +451,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
|
||||||
// flakiness of this test.
|
// flakiness of this test.
|
||||||
HRegion r = HRegion.openHRegion(
|
HRegion r = HRegion.openHRegion(
|
||||||
region, htdDisabled, hrs.getWAL(region), conf);
|
region, htdDisabled, hrs.getWAL(region), conf);
|
||||||
hrs.addToOnlineRegions(r);
|
hrs.addRegion(r);
|
||||||
|
|
||||||
HBaseFsck hbck = doFsck(conf, false);
|
HBaseFsck hbck = doFsck(conf, false);
|
||||||
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {
|
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {
|
||||||
|
|
|
@ -130,7 +130,7 @@ public class TestWALFiltering {
|
||||||
private List<byte[]> getRegionsByServer(int rsId) throws IOException {
|
private List<byte[]> getRegionsByServer(int rsId) throws IOException {
|
||||||
List<byte[]> regionNames = Lists.newArrayList();
|
List<byte[]> regionNames = Lists.newArrayList();
|
||||||
HRegionServer hrs = getRegionServer(rsId);
|
HRegionServer hrs = getRegionServer(rsId);
|
||||||
for (Region r : hrs.getOnlineRegions(TABLE_NAME)) {
|
for (Region r : hrs.getRegions(TABLE_NAME)) {
|
||||||
regionNames.add(r.getRegionInfo().getRegionName());
|
regionNames.add(r.getRegionInfo().getRegionName());
|
||||||
}
|
}
|
||||||
return regionNames;
|
return regionNames;
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class ErrorThrowingGetObserver implements RegionObserver {
|
||||||
throw new NotServingRegionException("Failing for test");
|
throw new NotServingRegionException("Failing for test");
|
||||||
case REGION_MOVED:
|
case REGION_MOVED:
|
||||||
throw new RegionMovedException(
|
throw new RegionMovedException(
|
||||||
e.getEnvironment().getRegionServerServices().getServerName(), 1);
|
e.getEnvironment().getCoprocessorRegionServerServices().getServerName(), 1);
|
||||||
case SCANNER_RESET:
|
case SCANNER_RESET:
|
||||||
throw new ScannerResetException("Failing for test");
|
throw new ScannerResetException("Failing for test");
|
||||||
case UNKNOWN_SCANNER:
|
case UNKNOWN_SCANNER:
|
||||||
|
|
Loading…
Reference in New Issue