HBASE-18234 Revisit the async admin api
This commit is contained in:
parent
d9dd319390
commit
289938337b
|
@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
|
@ -30,6 +31,7 @@ import java.util.SortedMap;
|
|||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -37,6 +39,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor.CollectingVisitor;
|
|||
import org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.Consistency;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.RawAsyncTable;
|
||||
|
@ -45,6 +48,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
|||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.client.Scan.ReadType;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
|
@ -98,34 +102,72 @@ public class AsyncMetaTableAccessor {
|
|||
return future;
|
||||
}
|
||||
|
||||
public static CompletableFuture<Pair<HRegionInfo, ServerName>> getRegion(RawAsyncTable metaTable,
|
||||
byte[] regionName) {
|
||||
CompletableFuture<Pair<HRegionInfo, ServerName>> future = new CompletableFuture<>();
|
||||
byte[] row = regionName;
|
||||
HRegionInfo parsedInfo = null;
|
||||
/**
|
||||
* Returns the HRegionLocation from meta for the given region
|
||||
* @param metaTable
|
||||
* @param regionName region we're looking for
|
||||
* @return HRegionLocation for the given region
|
||||
*/
|
||||
public static CompletableFuture<Optional<HRegionLocation>> getRegionLocation(
|
||||
RawAsyncTable metaTable, byte[] regionName) {
|
||||
CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
|
||||
try {
|
||||
parsedInfo = MetaTableAccessor.parseRegionInfoFromRegionName(regionName);
|
||||
row = MetaTableAccessor.getMetaKeyForRegion(parsedInfo);
|
||||
} catch (Exception parseEx) {
|
||||
// Ignore if regionName is a encoded region name.
|
||||
HRegionInfo parsedRegionInfo = MetaTableAccessor.parseRegionInfoFromRegionName(regionName);
|
||||
metaTable.get(
|
||||
new Get(MetaTableAccessor.getMetaKeyForRegion(parsedRegionInfo))
|
||||
.addFamily(HConstants.CATALOG_FAMILY)).whenComplete(
|
||||
(r, err) -> {
|
||||
if (err != null) {
|
||||
future.completeExceptionally(err);
|
||||
return;
|
||||
}
|
||||
future.complete(getRegionLocations(r).map(
|
||||
locations -> locations.getRegionLocation(parsedRegionInfo.getReplicaId())));
|
||||
});
|
||||
} catch (IOException parseEx) {
|
||||
LOG.warn("Failed to parse the passed region name: " + Bytes.toStringBinary(regionName));
|
||||
future.completeExceptionally(parseEx);
|
||||
}
|
||||
return future;
|
||||
}
|
||||
|
||||
final HRegionInfo finalHRI = parsedInfo;
|
||||
metaTable.get(new Get(row).addFamily(HConstants.CATALOG_FAMILY)).whenComplete((r, err) -> {
|
||||
if (err != null) {
|
||||
future.completeExceptionally(err);
|
||||
return;
|
||||
}
|
||||
RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
|
||||
HRegionLocation hrl = locations == null ? null
|
||||
: locations.getRegionLocation(finalHRI == null ? 0 : finalHRI.getReplicaId());
|
||||
if (hrl == null) {
|
||||
future.complete(null);
|
||||
} else {
|
||||
future.complete(new Pair<>(hrl.getRegionInfo(), hrl.getServerName()));
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Returns the HRegionLocation from meta for the given encoded region name
|
||||
* @param metaTable
|
||||
* @param encodedRegionName region we're looking for
|
||||
* @return HRegionLocation for the given region
|
||||
*/
|
||||
public static CompletableFuture<Optional<HRegionLocation>> getRegionLocationWithEncodedName(
|
||||
RawAsyncTable metaTable, byte[] encodedRegionName) {
|
||||
CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
|
||||
metaTable.scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY))
|
||||
.whenComplete(
|
||||
(results, err) -> {
|
||||
if (err != null) {
|
||||
future.completeExceptionally(err);
|
||||
return;
|
||||
}
|
||||
String encodedRegionNameStr = Bytes.toString(encodedRegionName);
|
||||
results
|
||||
.stream()
|
||||
.filter(result -> !result.isEmpty())
|
||||
.filter(result -> MetaTableAccessor.getHRegionInfo(result) != null)
|
||||
.forEach(
|
||||
result -> {
|
||||
getRegionLocations(result).ifPresent(
|
||||
locations -> {
|
||||
for (HRegionLocation location : locations.getRegionLocations()) {
|
||||
if (location != null
|
||||
&& encodedRegionNameStr.equals(location.getRegionInfo()
|
||||
.getEncodedName())) {
|
||||
future.complete(Optional.of(location));
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
future.complete(Optional.empty());
|
||||
});
|
||||
return future;
|
||||
}
|
||||
|
||||
|
@ -143,15 +185,29 @@ public class AsyncMetaTableAccessor {
|
|||
}
|
||||
|
||||
/**
|
||||
* Used to get table regions' info and server.
|
||||
* Used to get all region locations for the specific table.
|
||||
* @param metaTable
|
||||
* @param tableName table we're looking for, can be null for getting all regions
|
||||
* @return the list of regioninfos and server. The return value will be wrapped by a
|
||||
* @return the list of region locations. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
public static CompletableFuture<List<Pair<HRegionInfo, ServerName>>> getTableRegionsAndLocations(
|
||||
public static CompletableFuture<List<HRegionLocation>> getTableHRegionLocations(
|
||||
RawAsyncTable metaTable, final Optional<TableName> tableName) {
|
||||
return getTableRegionsAndLocations(metaTable, tableName, true);
|
||||
CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>();
|
||||
getTableRegionsAndLocations(metaTable, tableName, true).whenComplete(
|
||||
(locations, err) -> {
|
||||
if (err != null) {
|
||||
future.completeExceptionally(err);
|
||||
} else if (locations == null || locations.isEmpty()) {
|
||||
future.complete(Collections.emptyList());
|
||||
} else {
|
||||
List<HRegionLocation> regionLocations = locations.stream()
|
||||
.map(loc -> new HRegionLocation(loc.getFirst(), loc.getSecond()))
|
||||
.collect(Collectors.toList());
|
||||
future.complete(regionLocations);
|
||||
}
|
||||
});
|
||||
return future;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -162,7 +218,7 @@ public class AsyncMetaTableAccessor {
|
|||
* @return the list of regioninfos and server. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
public static CompletableFuture<List<Pair<HRegionInfo, ServerName>>> getTableRegionsAndLocations(
|
||||
private static CompletableFuture<List<Pair<HRegionInfo, ServerName>>> getTableRegionsAndLocations(
|
||||
RawAsyncTable metaTable, final Optional<TableName> tableName,
|
||||
final boolean excludeOfflinedSplitParents) {
|
||||
CompletableFuture<List<Pair<HRegionInfo, ServerName>>> future = new CompletableFuture<>();
|
||||
|
|
|
@ -364,13 +364,12 @@ public class MetaTableAccessor {
|
|||
* is stored in the name, so the returned object should only be used for the fields
|
||||
* in the regionName.
|
||||
*/
|
||||
protected static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName)
|
||||
throws IOException {
|
||||
public static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws IOException {
|
||||
byte[][] fields = HRegionInfo.parseRegionName(regionName);
|
||||
long regionId = Long.parseLong(Bytes.toString(fields[2]));
|
||||
long regionId = Long.parseLong(Bytes.toString(fields[2]));
|
||||
int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
|
||||
return new HRegionInfo(
|
||||
TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId);
|
||||
return new HRegionInfo(TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId,
|
||||
replicaId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
|
|||
import java.util.List;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -42,7 +43,7 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
* This feature is still under development, so marked as IA.Private. Will change to public when
|
||||
* done. Use it with caution.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceAudience.Public
|
||||
public interface AsyncAdmin {
|
||||
|
||||
/**
|
||||
|
@ -55,63 +56,50 @@ public interface AsyncAdmin {
|
|||
* @return True if table exists already. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<Boolean> tableExists(final TableName tableName);
|
||||
CompletableFuture<Boolean> tableExists(TableName tableName);
|
||||
|
||||
/**
|
||||
* List all the userspace tables.
|
||||
* @return - returns an array of TableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
* @see #listTables(Pattern, boolean)
|
||||
* @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
* @see #listTables(Optional, boolean)
|
||||
*/
|
||||
CompletableFuture<TableDescriptor[]> listTables();
|
||||
|
||||
/**
|
||||
* List all the tables matching the given pattern.
|
||||
* @param regex The regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return - returns an array of TableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
* @see #listTables(Pattern, boolean)
|
||||
*/
|
||||
CompletableFuture<TableDescriptor[]> listTables(String regex, boolean includeSysTables);
|
||||
default CompletableFuture<List<TableDescriptor>> listTables() {
|
||||
return listTables(Optional.empty(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* List all the tables matching the given pattern.
|
||||
* @param pattern The compiled regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return - returns an array of TableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
* @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<TableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables);
|
||||
CompletableFuture<List<TableDescriptor>> listTables(Optional<Pattern> pattern,
|
||||
boolean includeSysTables);
|
||||
|
||||
/**
|
||||
* List all of the names of userspace tables.
|
||||
* @return TableName[] an array of table names wrapped by a {@link CompletableFuture}.
|
||||
* @see #listTableNames(Pattern, boolean)
|
||||
* @return a list of table names wrapped by a {@link CompletableFuture}.
|
||||
* @see #listTableNames(Optional, boolean)
|
||||
*/
|
||||
CompletableFuture<TableName[]> listTableNames();
|
||||
|
||||
/**
|
||||
* List all of the names of userspace tables.
|
||||
* @param regex The regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return TableName[] an array of table names wrapped by a {@link CompletableFuture}.
|
||||
* @see #listTableNames(Pattern, boolean)
|
||||
*/
|
||||
CompletableFuture<TableName[]> listTableNames(final String regex, final boolean includeSysTables);
|
||||
default CompletableFuture<List<TableName>> listTableNames() {
|
||||
return listTableNames(Optional.empty(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* List all of the names of userspace tables.
|
||||
* @param pattern The regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return TableName[] an array of table names wrapped by a {@link CompletableFuture}.
|
||||
* @return a list of table names wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<TableName[]> listTableNames(final Pattern pattern,
|
||||
final boolean includeSysTables);
|
||||
CompletableFuture<List<TableName>> listTableNames(Optional<Pattern> pattern,
|
||||
boolean includeSysTables);
|
||||
|
||||
/**
|
||||
* Method for getting the tableDescriptor
|
||||
* @param tableName as a {@link TableName}
|
||||
* @return the read-only tableDescriptor wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<TableDescriptor> getTableDescriptor(final TableName tableName);
|
||||
CompletableFuture<TableDescriptor> getTableDescriptor(TableName tableName);
|
||||
|
||||
/**
|
||||
* Creates a new table.
|
||||
|
@ -140,94 +128,63 @@ public interface AsyncAdmin {
|
|||
* @param desc table descriptor for table
|
||||
* @param splitKeys array of split keys for the initial regions of the table
|
||||
*/
|
||||
CompletableFuture<Void> createTable(final TableDescriptor desc, byte[][] splitKeys);
|
||||
CompletableFuture<Void> createTable(TableDescriptor desc, byte[][] splitKeys);
|
||||
|
||||
/**
|
||||
* Deletes a table.
|
||||
* @param tableName name of table to delete
|
||||
*/
|
||||
CompletableFuture<Void> deleteTable(final TableName tableName);
|
||||
|
||||
/**
|
||||
* Deletes tables matching the passed in pattern and wait on completion. Warning: Use this method
|
||||
* carefully, there is no prompting and the effect is immediate. Consider using
|
||||
* {@link #listTables(String, boolean)} and
|
||||
* {@link #deleteTable(org.apache.hadoop.hbase.TableName)}
|
||||
* @param regex The regular expression to match table names against
|
||||
* @return Table descriptors for tables that couldn't be deleted. The return value will be wrapped
|
||||
* by a {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<TableDescriptor[]> deleteTables(String regex);
|
||||
CompletableFuture<Void> deleteTable(TableName tableName);
|
||||
|
||||
/**
|
||||
* Delete tables matching the passed in pattern and wait on completion. Warning: Use this method
|
||||
* carefully, there is no prompting and the effect is immediate. Consider using
|
||||
* {@link #listTables(Pattern, boolean) } and
|
||||
* {@link #listTableNames(Optional, boolean) } and
|
||||
* {@link #deleteTable(org.apache.hadoop.hbase.TableName)}
|
||||
* @param pattern The pattern to match table names against
|
||||
* @return Table descriptors for tables that couldn't be deleted. The return value will be wrapped
|
||||
* by a {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<TableDescriptor[]> deleteTables(Pattern pattern);
|
||||
CompletableFuture<List<TableDescriptor>> deleteTables(Pattern pattern);
|
||||
|
||||
/**
|
||||
* Truncate a table.
|
||||
* @param tableName name of table to truncate
|
||||
* @param preserveSplits True if the splits should be preserved
|
||||
*/
|
||||
CompletableFuture<Void> truncateTable(final TableName tableName, final boolean preserveSplits);
|
||||
CompletableFuture<Void> truncateTable(TableName tableName, boolean preserveSplits);
|
||||
|
||||
/**
|
||||
* Enable a table. The table has to be in disabled state for it to be enabled.
|
||||
* @param tableName name of the table
|
||||
*/
|
||||
CompletableFuture<Void> enableTable(final TableName tableName);
|
||||
CompletableFuture<Void> enableTable(TableName tableName);
|
||||
|
||||
/**
|
||||
* Enable tables matching the passed in pattern. Warning: Use this method carefully, there is no
|
||||
* prompting and the effect is immediate. Consider using {@link #listTables(Pattern, boolean)} and
|
||||
* {@link #enableTable(TableName)}
|
||||
* @param regex The regular expression to match table names against
|
||||
* @return Table descriptors for tables that couldn't be enabled. The return value will be wrapped
|
||||
* by a {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<TableDescriptor[]> enableTables(String regex);
|
||||
|
||||
/**
|
||||
* Enable tables matching the passed in pattern. Warning: Use this method carefully, there is no
|
||||
* prompting and the effect is immediate. Consider using {@link #listTables(Pattern, boolean)} and
|
||||
* prompting and the effect is immediate. Consider using {@link #listTables(Optional, boolean)} and
|
||||
* {@link #enableTable(TableName)}
|
||||
* @param pattern The pattern to match table names against
|
||||
* @return Table descriptors for tables that couldn't be enabled. The return value will be wrapped
|
||||
* by a {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<TableDescriptor[]> enableTables(Pattern pattern);
|
||||
CompletableFuture<List<TableDescriptor>> enableTables(Pattern pattern);
|
||||
|
||||
/**
|
||||
* Disable a table. The table has to be in enabled state for it to be disabled.
|
||||
* @param tableName
|
||||
*/
|
||||
CompletableFuture<Void> disableTable(final TableName tableName);
|
||||
CompletableFuture<Void> disableTable(TableName tableName);
|
||||
|
||||
/**
|
||||
* Disable tables matching the passed in pattern. Warning: Use this method carefully, there is no
|
||||
* prompting and the effect is immediate. Consider using {@link #listTables(Pattern, boolean)} and
|
||||
* {@link #disableTable(TableName)}
|
||||
* @param regex The regular expression to match table names against
|
||||
* @return Table descriptors for tables that couldn't be disabled. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<TableDescriptor[]> disableTables(String regex);
|
||||
|
||||
/**
|
||||
* Disable tables matching the passed in pattern. Warning: Use this method carefully, there is no
|
||||
* prompting and the effect is immediate. Consider using {@link #listTables(Pattern, boolean)} and
|
||||
* prompting and the effect is immediate. Consider using {@link #listTables(Optional, boolean)} and
|
||||
* {@link #disableTable(TableName)}
|
||||
* @param pattern The pattern to match table names against
|
||||
* @return Table descriptors for tables that couldn't be disabled. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<TableDescriptor[]> disableTables(Pattern pattern);
|
||||
CompletableFuture<List<TableDescriptor>> disableTables(Pattern pattern);
|
||||
|
||||
/**
|
||||
* @param tableName name of table to check
|
||||
|
@ -261,61 +218,61 @@ public interface AsyncAdmin {
|
|||
* yet to be updated Pair.getSecond() is the total number of regions of the table. The
|
||||
* return value will be wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<Pair<Integer, Integer>> getAlterStatus(final TableName tableName);
|
||||
CompletableFuture<Pair<Integer, Integer>> getAlterStatus(TableName tableName);
|
||||
|
||||
/**
|
||||
* Add a column family to an existing table.
|
||||
* @param tableName name of the table to add column family to
|
||||
* @param columnFamily column family descriptor of column family to be added
|
||||
*/
|
||||
CompletableFuture<Void> addColumnFamily(final TableName tableName,
|
||||
final ColumnFamilyDescriptor columnFamily);
|
||||
CompletableFuture<Void> addColumnFamily(TableName tableName,
|
||||
ColumnFamilyDescriptor columnFamily);
|
||||
|
||||
/**
|
||||
* Delete a column family from a table.
|
||||
* @param tableName name of table
|
||||
* @param columnFamily name of column family to be deleted
|
||||
*/
|
||||
CompletableFuture<Void> deleteColumnFamily(final TableName tableName, final byte[] columnFamily);
|
||||
CompletableFuture<Void> deleteColumnFamily(TableName tableName, byte[] columnFamily);
|
||||
|
||||
/**
|
||||
* Modify an existing column family on a table.
|
||||
* @param tableName name of table
|
||||
* @param columnFamily new column family descriptor to use
|
||||
*/
|
||||
CompletableFuture<Void> modifyColumnFamily(final TableName tableName,
|
||||
final ColumnFamilyDescriptor columnFamily);
|
||||
CompletableFuture<Void> modifyColumnFamily(TableName tableName,
|
||||
ColumnFamilyDescriptor columnFamily);
|
||||
|
||||
/**
|
||||
* Create a new namespace.
|
||||
* @param descriptor descriptor which describes the new namespace
|
||||
*/
|
||||
CompletableFuture<Void> createNamespace(final NamespaceDescriptor descriptor);
|
||||
CompletableFuture<Void> createNamespace(NamespaceDescriptor descriptor);
|
||||
|
||||
/**
|
||||
* Modify an existing namespace.
|
||||
* @param descriptor descriptor which describes the new namespace
|
||||
*/
|
||||
CompletableFuture<Void> modifyNamespace(final NamespaceDescriptor descriptor);
|
||||
CompletableFuture<Void> modifyNamespace(NamespaceDescriptor descriptor);
|
||||
|
||||
/**
|
||||
* Delete an existing namespace. Only empty namespaces (no tables) can be removed.
|
||||
* @param name namespace name
|
||||
*/
|
||||
CompletableFuture<Void> deleteNamespace(final String name);
|
||||
CompletableFuture<Void> deleteNamespace(String name);
|
||||
|
||||
/**
|
||||
* Get a namespace descriptor by name
|
||||
* @param name name of namespace descriptor
|
||||
* @return A descriptor wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<NamespaceDescriptor> getNamespaceDescriptor(final String name);
|
||||
CompletableFuture<NamespaceDescriptor> getNamespaceDescriptor(String name);
|
||||
|
||||
/**
|
||||
* List available namespace descriptors
|
||||
* @return List of descriptors wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<NamespaceDescriptor[]> listNamespaceDescriptors();
|
||||
CompletableFuture<List<NamespaceDescriptor>> listNamespaceDescriptors();
|
||||
|
||||
/**
|
||||
* @param tableName name of table to check
|
||||
|
@ -329,7 +286,7 @@ public interface AsyncAdmin {
|
|||
* @param on
|
||||
* @return Previous balancer value wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<Boolean> setBalancerRunning(final boolean on);
|
||||
CompletableFuture<Boolean> setBalancerOn(boolean on);
|
||||
|
||||
/**
|
||||
* Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
|
||||
|
@ -337,72 +294,38 @@ public interface AsyncAdmin {
|
|||
* @return True if balancer ran, false otherwise. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<Boolean> balancer();
|
||||
default CompletableFuture<Boolean> balance() {
|
||||
return balance(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
|
||||
* reassignments. If there is region in transition, force parameter of true would still run
|
||||
* balancer. Can *not* run for other reasons. Check logs.
|
||||
* @param force whether we should force balance even if there is region in transition.
|
||||
* @param forcible whether we should force balance even if there is region in transition.
|
||||
* @return True if balancer ran, false otherwise. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<Boolean> balancer(boolean force);
|
||||
CompletableFuture<Boolean> balance(boolean forcible);
|
||||
|
||||
/**
|
||||
* Query the current state of the balancer.
|
||||
* @return true if the balancer is enabled, false otherwise.
|
||||
* The return value will be wrapped by a {@link CompletableFuture}.
|
||||
* @return true if the balance switch is on, false otherwise The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<Boolean> isBalancerEnabled();
|
||||
CompletableFuture<Boolean> isBalancerOn();
|
||||
|
||||
/**
|
||||
* Close a region. For expert-admins. Runs close on the regionserver. The master will not be
|
||||
* Close a region. For expert-admins Runs close on the regionserver. The master will not be
|
||||
* informed of the close.
|
||||
*
|
||||
* @param regionname region name to close
|
||||
* @param serverName If supplied, we'll use this location rather than the one currently in
|
||||
* <code>hbase:meta</code>
|
||||
*/
|
||||
CompletableFuture<Void> closeRegion(String regionname, String serverName);
|
||||
|
||||
/**
|
||||
* Close a region. For expert-admins Runs close on the regionserver. The master will not be
|
||||
* informed of the close.
|
||||
*
|
||||
* @param regionname region name to close
|
||||
* @param serverName The servername of the regionserver. If passed null we will use servername
|
||||
* found in the hbase:meta table. A server name is made of host, port and startcode. Here is an
|
||||
* example: <code> host187.example.com,60020,1289493121758</code>
|
||||
*/
|
||||
CompletableFuture<Void> closeRegion(byte[] regionname, String serverName);
|
||||
|
||||
/**
|
||||
* For expert-admins. Runs close on the regionserver. Closes a region based on the encoded region
|
||||
* name. The region server name is mandatory. If the servername is provided then based on the
|
||||
* online regions in the specified regionserver the specified region will be closed. The master
|
||||
* will not be informed of the close. Note that the regionname is the encoded regionname.
|
||||
*
|
||||
* @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
|
||||
* suffix: e.g. if regionname is
|
||||
* <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
|
||||
* then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
|
||||
* @param serverName The servername of the regionserver. A server name is made of host, port and
|
||||
* startcode. This is mandatory. Here is an example:
|
||||
* <code> host187.example.com,60020,1289493121758</code>
|
||||
* @param regionName region name to close
|
||||
* @param serverName The servername of the regionserver. If not present, we will use servername
|
||||
* found in the hbase:meta table. A server name is made of host, port and startcode. Here
|
||||
* is an example: <code> host187.example.com,60020,1289493121758</code>
|
||||
* @return true if the region was closed, false if not. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<Boolean> closeRegionWithEncodedRegionName(String encodedRegionName, String serverName);
|
||||
|
||||
/**
|
||||
* Close a region. For expert-admins Runs close on the regionserver. The master will not be
|
||||
* informed of the close.
|
||||
*
|
||||
* @param sn
|
||||
* @param hri
|
||||
*/
|
||||
CompletableFuture<Void> closeRegion(ServerName sn, HRegionInfo hri);
|
||||
CompletableFuture<Boolean> closeRegion(byte[] regionName, Optional<ServerName> serverName);
|
||||
|
||||
/**
|
||||
* Get all the online regions on a region server.
|
||||
|
@ -422,60 +345,80 @@ public interface AsyncAdmin {
|
|||
CompletableFuture<Void> flushRegion(byte[] regionName);
|
||||
|
||||
/**
|
||||
* Compact a table. Asynchronous operation even if CompletableFuture.get().
|
||||
* Compact a table. When the returned CompletableFuture is done, it only means the compact request
|
||||
* was sent to HBase and may need some time to finish the compact operation.
|
||||
* @param tableName table to compact
|
||||
*/
|
||||
CompletableFuture<Void> compact(TableName tableName);
|
||||
default CompletableFuture<Void> compact(TableName tableName) {
|
||||
return compact(tableName, Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact a column family within a table. Asynchronous operation even if CompletableFuture.get().
|
||||
* Compact a column family within a table. When the returned CompletableFuture is done, it only
|
||||
* means the compact request was sent to HBase and may need some time to finish the compact
|
||||
* operation.
|
||||
* @param tableName table to compact
|
||||
* @param columnFamily column family within a table
|
||||
* @param columnFamily column family within a table. If not present, compact the table's all
|
||||
* column families.
|
||||
*/
|
||||
CompletableFuture<Void> compact(TableName tableName, byte[] columnFamily);
|
||||
CompletableFuture<Void> compact(TableName tableName, Optional<byte[]> columnFamily);
|
||||
|
||||
/**
|
||||
* Compact an individual region. Asynchronous operation even if CompletableFuture.get().
|
||||
* Compact an individual region. When the returned CompletableFuture is done, it only means the
|
||||
* compact request was sent to HBase and may need some time to finish the compact operation.
|
||||
* @param regionName region to compact
|
||||
*/
|
||||
CompletableFuture<Void> compactRegion(byte[] regionName);
|
||||
default CompletableFuture<Void> compactRegion(byte[] regionName) {
|
||||
return compactRegion(regionName, Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact a column family within a region. Asynchronous operation even if
|
||||
* CompletableFuture.get().
|
||||
* Compact a column family within a region. When the returned CompletableFuture is done, it only
|
||||
* means the compact request was sent to HBase and may need some time to finish the compact
|
||||
* operation.
|
||||
* @param regionName region to compact
|
||||
* @param columnFamily column family within a region
|
||||
* @param columnFamily column family within a region. If not present, compact the region's all
|
||||
* column families.
|
||||
*/
|
||||
CompletableFuture<Void> compactRegion(byte[] regionName, byte[] columnFamily);
|
||||
CompletableFuture<Void> compactRegion(byte[] regionName, Optional<byte[]> columnFamily);
|
||||
|
||||
/**
|
||||
* Major compact a table. Asynchronous operation even if CompletableFuture.get().
|
||||
* Major compact a table. When the returned CompletableFuture is done, it only means the compact
|
||||
* request was sent to HBase and may need some time to finish the compact operation.
|
||||
* @param tableName table to major compact
|
||||
*/
|
||||
CompletableFuture<Void> majorCompact(TableName tableName);
|
||||
default CompletableFuture<Void> majorCompact(TableName tableName) {
|
||||
return majorCompact(tableName, Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Major compact a column family within a table. Asynchronous operation even if
|
||||
* CompletableFuture.get().
|
||||
* Major compact a column family within a table. When the returned CompletableFuture is done, it
|
||||
* only means the compact request was sent to HBase and may need some time to finish the compact
|
||||
* operation.
|
||||
* @param tableName table to major compact
|
||||
* @param columnFamily column family within a table
|
||||
* @param columnFamily column family within a table. If not present, major compact the table's all
|
||||
* column families.
|
||||
*/
|
||||
CompletableFuture<Void> majorCompact(TableName tableName, byte[] columnFamily);
|
||||
CompletableFuture<Void> majorCompact(TableName tableName, Optional<byte[]> columnFamily);
|
||||
|
||||
/**
|
||||
* Major compact a table or an individual region. Asynchronous operation even if
|
||||
* CompletableFuture.get().
|
||||
* Major compact a region. When the returned CompletableFuture is done, it only means the compact
|
||||
* request was sent to HBase and may need some time to finish the compact operation.
|
||||
* @param regionName region to major compact
|
||||
*/
|
||||
CompletableFuture<Void> majorCompactRegion(byte[] regionName);
|
||||
default CompletableFuture<Void> majorCompactRegion(byte[] regionName) {
|
||||
return majorCompactRegion(regionName, Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Major compact a column family within region. Asynchronous operation even if
|
||||
* CompletableFuture.get().
|
||||
* @param regionName egion to major compact
|
||||
* @param columnFamily column family within a region
|
||||
* Major compact a column family within region. When the returned CompletableFuture is done, it
|
||||
* only means the compact request was sent to HBase and may need some time to finish the compact
|
||||
* operation.
|
||||
* @param regionName region to major compact
|
||||
* @param columnFamily column family within a region. If not present, major compact the region's
|
||||
* all column families.
|
||||
*/
|
||||
CompletableFuture<Void> majorCompactRegion(byte[] regionName, byte[] columnFamily);
|
||||
CompletableFuture<Void> majorCompactRegion(byte[] regionName, Optional<byte[]> columnFamily);
|
||||
|
||||
/**
|
||||
* Compact all regions on the region server.
|
||||
|
@ -496,51 +439,54 @@ public interface AsyncAdmin {
|
|||
* @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent
|
||||
* regions
|
||||
*/
|
||||
CompletableFuture<Void> mergeRegions(final byte[] nameOfRegionA, final byte[] nameOfRegionB,
|
||||
final boolean forcible);
|
||||
CompletableFuture<Void> mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB,
|
||||
boolean forcible);
|
||||
|
||||
/**
|
||||
* Split a table. The method will execute split action for each region in table.
|
||||
* @param tableName table to split
|
||||
*/
|
||||
CompletableFuture<Void> split(final TableName tableName);
|
||||
CompletableFuture<Void> split(TableName tableName);
|
||||
|
||||
/**
|
||||
* Split an individual region.
|
||||
* @param regionName region to split
|
||||
*/
|
||||
CompletableFuture<Void> splitRegion(final byte[] regionName);
|
||||
default CompletableFuture<Void> splitRegion(byte[] regionName) {
|
||||
return splitRegion(regionName, Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a table.
|
||||
* @param tableName table to split
|
||||
* @param splitPoint the explicit position to split on
|
||||
*/
|
||||
CompletableFuture<Void> split(final TableName tableName, final byte[] splitPoint);
|
||||
CompletableFuture<Void> split(TableName tableName, byte[] splitPoint);
|
||||
|
||||
/**
|
||||
* Split an individual region.
|
||||
* @param regionName region to split
|
||||
* @param splitPoint the explicit position to split on
|
||||
* @param splitPoint the explicit position to split on. If not present, it will decide by region
|
||||
* server.
|
||||
*/
|
||||
CompletableFuture<Void> splitRegion(final byte[] regionName, final byte[] splitPoint);
|
||||
CompletableFuture<Void> splitRegion(byte[] regionName, Optional<byte[]> splitPoint);
|
||||
|
||||
/**
|
||||
* @param regionName Encoded or full name of region to assign.
|
||||
*/
|
||||
CompletableFuture<Void> assign(final byte[] regionName);
|
||||
CompletableFuture<Void> assign(byte[] regionName);
|
||||
|
||||
/**
|
||||
* Unassign a region from current hosting regionserver. Region will then be assigned to a
|
||||
* regionserver chosen at random. Region could be reassigned back to the same server. Use
|
||||
* {@link #move(byte[], byte[])} if you want to control the region movement.
|
||||
* {@link #move(byte[], Optional)} if you want to control the region movement.
|
||||
* @param regionName Encoded or full name of region to unassign. Will clear any existing
|
||||
* RegionPlan if one found.
|
||||
* @param force If true, force unassign (Will remove region from regions-in-transition too if
|
||||
* @param forcible If true, force unassign (Will remove region from regions-in-transition too if
|
||||
* present. If results in double assignment use hbck -fix to resolve. To be used by
|
||||
* experts).
|
||||
*/
|
||||
CompletableFuture<Void> unassign(final byte[] regionName, final boolean force);
|
||||
CompletableFuture<Void> unassign(byte[] regionName, boolean forcible);
|
||||
|
||||
/**
|
||||
* Offline specified region from master's in-memory state. It will not attempt to reassign the
|
||||
|
@ -550,22 +496,22 @@ public interface AsyncAdmin {
|
|||
* experts or hbck.
|
||||
* @param regionName Encoded or full name of region to offline
|
||||
*/
|
||||
CompletableFuture<Void> offline(final byte[] regionName);
|
||||
CompletableFuture<Void> offline(byte[] regionName);
|
||||
|
||||
/**
|
||||
* Move the region <code>r</code> to <code>dest</code>.
|
||||
* @param regionName Encoded or full name of region to move.
|
||||
* @param destServerName The servername of the destination regionserver. If passed the empty byte
|
||||
* array we'll assign to a random server. A server name is made of host, port and
|
||||
* startcode. Here is an example: <code> host187.example.com,60020,1289493121758</code>
|
||||
* @param destServerName The servername of the destination regionserver. If not present, we'll
|
||||
* assign to a random server. A server name is made of host, port and startcode. Here is
|
||||
* an example: <code> host187.example.com,60020,1289493121758</code>
|
||||
*/
|
||||
CompletableFuture<Void> move(final byte[] regionName, final byte[] destServerName);
|
||||
CompletableFuture<Void> move(byte[] regionName, Optional<ServerName> destServerName);
|
||||
|
||||
/**
|
||||
* Apply the new quota settings.
|
||||
* @param quota the quota settings
|
||||
*/
|
||||
CompletableFuture<Void> setQuota(final QuotaSettings quota);
|
||||
CompletableFuture<Void> setQuota(QuotaSettings quota);
|
||||
|
||||
/**
|
||||
* List the quotas based on the filter.
|
||||
|
@ -579,41 +525,41 @@ public interface AsyncAdmin {
|
|||
* @param peerId a short name that identifies the peer
|
||||
* @param peerConfig configuration for the replication slave cluster
|
||||
*/
|
||||
CompletableFuture<Void> addReplicationPeer(final String peerId,
|
||||
final ReplicationPeerConfig peerConfig);
|
||||
CompletableFuture<Void> addReplicationPeer(String peerId,
|
||||
ReplicationPeerConfig peerConfig);
|
||||
|
||||
/**
|
||||
* Remove a peer and stop the replication
|
||||
* @param peerId a short name that identifies the peer
|
||||
*/
|
||||
CompletableFuture<Void> removeReplicationPeer(final String peerId);
|
||||
CompletableFuture<Void> removeReplicationPeer(String peerId);
|
||||
|
||||
/**
|
||||
* Restart the replication stream to the specified peer
|
||||
* @param peerId a short name that identifies the peer
|
||||
*/
|
||||
CompletableFuture<Void> enableReplicationPeer(final String peerId);
|
||||
CompletableFuture<Void> enableReplicationPeer(String peerId);
|
||||
|
||||
/**
|
||||
* Stop the replication stream to the specified peer
|
||||
* @param peerId a short name that identifies the peer
|
||||
*/
|
||||
CompletableFuture<Void> disableReplicationPeer(final String peerId);
|
||||
CompletableFuture<Void> disableReplicationPeer(String peerId);
|
||||
|
||||
/**
|
||||
* Returns the configured ReplicationPeerConfig for the specified peer
|
||||
* @param peerId a short name that identifies the peer
|
||||
* @return ReplicationPeerConfig for the peer wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<ReplicationPeerConfig> getReplicationPeerConfig(final String peerId);
|
||||
CompletableFuture<ReplicationPeerConfig> getReplicationPeerConfig(String peerId);
|
||||
|
||||
/**
|
||||
* Update the peerConfig for the specified peer
|
||||
* @param peerId a short name that identifies the peer
|
||||
* @param peerConfig new config for the peer
|
||||
*/
|
||||
CompletableFuture<Void> updateReplicationPeerConfig(final String peerId,
|
||||
final ReplicationPeerConfig peerConfig);
|
||||
CompletableFuture<Void> updateReplicationPeerConfig(String peerId,
|
||||
ReplicationPeerConfig peerConfig);
|
||||
|
||||
/**
|
||||
* Append the replicable table-cf config of the specified peer
|
||||
|
@ -636,15 +582,9 @@ public interface AsyncAdmin {
|
|||
* @return a list of replication peers description. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers();
|
||||
|
||||
/**
|
||||
* Return a list of replication peers.
|
||||
* @param regex The regular expression to match peer id
|
||||
* @return a list of replication peers description. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers(String regex);
|
||||
default CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers() {
|
||||
return listReplicationPeers(Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a list of replication peers.
|
||||
|
@ -652,7 +592,8 @@ public interface AsyncAdmin {
|
|||
* @return a list of replication peers description. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers(Pattern pattern);
|
||||
CompletableFuture<List<ReplicationPeerDescription>>
|
||||
listReplicationPeers(Optional<Pattern> pattern);
|
||||
|
||||
/**
|
||||
* Find all table and column families that are replicated from this cluster
|
||||
|
@ -686,7 +627,7 @@ public interface AsyncAdmin {
|
|||
* @param tableName name of the table to snapshot
|
||||
* @param type type of snapshot to take
|
||||
*/
|
||||
CompletableFuture<Void> snapshot(final String snapshotName, final TableName tableName,
|
||||
CompletableFuture<Void> snapshot(String snapshotName, TableName tableName,
|
||||
SnapshotType type);
|
||||
|
||||
/**
|
||||
|
@ -718,7 +659,7 @@ public interface AsyncAdmin {
|
|||
* @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
|
||||
* running
|
||||
*/
|
||||
CompletableFuture<Boolean> isSnapshotFinished(final SnapshotDescription snapshot);
|
||||
CompletableFuture<Boolean> isSnapshotFinished(SnapshotDescription snapshot);
|
||||
|
||||
/**
|
||||
* Restore the specified snapshot on the original table. (The table must be disabled) If the
|
||||
|
@ -747,7 +688,7 @@ public interface AsyncAdmin {
|
|||
* @param snapshotName name of the snapshot to be cloned
|
||||
* @param tableName name of the table where the snapshot will be restored
|
||||
*/
|
||||
CompletableFuture<Void> cloneSnapshot(final String snapshotName, final TableName tableName);
|
||||
CompletableFuture<Void> cloneSnapshot(String snapshotName, TableName tableName);
|
||||
|
||||
/**
|
||||
* List completed snapshots.
|
||||
|
@ -756,13 +697,6 @@ public interface AsyncAdmin {
|
|||
*/
|
||||
CompletableFuture<List<SnapshotDescription>> listSnapshots();
|
||||
|
||||
/**
|
||||
* List all the completed snapshots matching the given regular expression.
|
||||
* @param regex The regular expression to match against
|
||||
* @return - returns a List of SnapshotDescription wrapped by a {@link CompletableFuture}
|
||||
*/
|
||||
CompletableFuture<List<SnapshotDescription>> listSnapshots(String regex);
|
||||
|
||||
/**
|
||||
* List all the completed snapshots matching the given pattern.
|
||||
* @param pattern The compiled regular expression to match against
|
||||
|
@ -770,17 +704,6 @@ public interface AsyncAdmin {
|
|||
*/
|
||||
CompletableFuture<List<SnapshotDescription>> listSnapshots(Pattern pattern);
|
||||
|
||||
/**
|
||||
* List all the completed snapshots matching the given table name regular expression and snapshot
|
||||
* name regular expression.
|
||||
* @param tableNameRegex The table name regular expression to match against
|
||||
* @param snapshotNameRegex The snapshot name regular expression to match against
|
||||
* @return - returns a List of completed SnapshotDescription wrapped by a
|
||||
* {@link CompletableFuture}
|
||||
*/
|
||||
CompletableFuture<List<SnapshotDescription>> listTableSnapshots(String tableNameRegex,
|
||||
String snapshotNameRegex);
|
||||
|
||||
/**
|
||||
* List all the completed snapshots matching the given table name regular expression and snapshot
|
||||
* name regular expression.
|
||||
|
@ -798,26 +721,12 @@ public interface AsyncAdmin {
|
|||
*/
|
||||
CompletableFuture<Void> deleteSnapshot(String snapshotName);
|
||||
|
||||
/**
|
||||
* Delete existing snapshots whose names match the pattern passed.
|
||||
* @param regex The regular expression to match against
|
||||
*/
|
||||
CompletableFuture<Void> deleteSnapshots(String regex);
|
||||
|
||||
/**
|
||||
* Delete existing snapshots whose names match the pattern passed.
|
||||
* @param pattern pattern for names of the snapshot to match
|
||||
*/
|
||||
CompletableFuture<Void> deleteSnapshots(Pattern pattern);
|
||||
|
||||
/**
|
||||
* Delete all existing snapshots matching the given table name regular expression and snapshot
|
||||
* name regular expression.
|
||||
* @param tableNameRegex The table name regular expression to match against
|
||||
* @param snapshotNameRegex The snapshot name regular expression to match against
|
||||
*/
|
||||
CompletableFuture<Void> deleteTableSnapshots(String tableNameRegex, String snapshotNameRegex);
|
||||
|
||||
/**
|
||||
* Delete all existing snapshots matching the given table name regular expression and snapshot
|
||||
* name regular expression.
|
||||
|
@ -861,7 +770,7 @@ public interface AsyncAdmin {
|
|||
* @param instance The instance name of the procedure
|
||||
* @param props Property/Value pairs of properties passing to the procedure
|
||||
* @return true if the specified procedure is finished successfully, false if it is still running.
|
||||
* The value is vrapped by {@link CompletableFuture}
|
||||
* The value is wrapped by {@link CompletableFuture}
|
||||
*/
|
||||
CompletableFuture<Boolean> isProcedureFinished(String signature, String instance,
|
||||
Map<String, String> props);
|
||||
|
@ -879,5 +788,5 @@ public interface AsyncAdmin {
|
|||
* List procedures
|
||||
* @return procedure list wrapped by {@link CompletableFuture}
|
||||
*/
|
||||
CompletableFuture<ProcedureInfo[]> listProcedures();
|
||||
CompletableFuture<List<ProcedureInfo>> listProcedures();
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -33,8 +33,10 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -414,18 +416,14 @@ public final class ProtobufUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get NamespaceDescriptor[] from ListNamespaceDescriptorsResponse protobuf
|
||||
* Get a list of NamespaceDescriptor from ListNamespaceDescriptorsResponse protobuf
|
||||
* @param proto the ListNamespaceDescriptorsResponse
|
||||
* @return NamespaceDescriptor[]
|
||||
* @return a list of NamespaceDescriptor
|
||||
*/
|
||||
public static NamespaceDescriptor[] getNamespaceDescriptorArray(
|
||||
public static List<NamespaceDescriptor> toNamespaceDescriptorList(
|
||||
ListNamespaceDescriptorsResponse proto) {
|
||||
List<HBaseProtos.NamespaceDescriptor> list = proto.getNamespaceDescriptorList();
|
||||
NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
|
||||
for (int i = 0; i < list.size(); i++) {
|
||||
res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
|
||||
}
|
||||
return res;
|
||||
return proto.getNamespaceDescriptorList().stream().map(ProtobufUtil::toNamespaceDescriptor)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -433,7 +431,7 @@ public final class ProtobufUtil {
|
|||
*
|
||||
* @param proto the GetTableDescriptorsResponse
|
||||
* @return a immutable HTableDescriptor array
|
||||
* @deprecated Use {@link #getTableDescriptorArray} after removing the HTableDescriptor
|
||||
* @deprecated Use {@link #toTableDescriptorList} after removing the HTableDescriptor
|
||||
*/
|
||||
@Deprecated
|
||||
public static HTableDescriptor[] getHTableDescriptorArray(GetTableDescriptorsResponse proto) {
|
||||
|
@ -447,18 +445,17 @@ public final class ProtobufUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get TableDescriptor[] from GetTableDescriptorsResponse protobuf
|
||||
* Get a list of TableDescriptor from GetTableDescriptorsResponse protobuf
|
||||
*
|
||||
* @param proto the GetTableDescriptorsResponse
|
||||
* @return TableDescriptor[]
|
||||
* @return a list of TableDescriptor
|
||||
*/
|
||||
public static TableDescriptor[] getTableDescriptorArray(GetTableDescriptorsResponse proto) {
|
||||
if (proto == null) return new TableDescriptor[0];
|
||||
return proto.getTableSchemaList()
|
||||
.stream()
|
||||
.map(ProtobufUtil::convertToTableDesc)
|
||||
.toArray(size -> new TableDescriptor[size]);
|
||||
public static List<TableDescriptor> toTableDescriptorList(GetTableDescriptorsResponse proto) {
|
||||
if (proto == null) return new ArrayList<>();
|
||||
return proto.getTableSchemaList().stream().map(ProtobufUtil::convertToTableDesc)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
/**
|
||||
* get the split keys in form "byte [][]" from a CreateTableRequest proto
|
||||
*
|
||||
|
@ -2398,6 +2395,13 @@ public final class ProtobufUtil {
|
|||
.setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build();
|
||||
}
|
||||
|
||||
public static List<TableName> toTableNameList(List<HBaseProtos.TableName> tableNamesList) {
|
||||
if (tableNamesList == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return tableNamesList.stream().map(ProtobufUtil::toTableName).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public static TableName[] getTableNameArray(List<HBaseProtos.TableName> tableNamesList) {
|
||||
if (tableNamesList == null) {
|
||||
return new TableName[0];
|
||||
|
@ -3345,23 +3349,33 @@ public final class ProtobufUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Create a SplitRegionRequest for a given region name
|
||||
*
|
||||
* @param regionName the name of the region to split
|
||||
* @param splitPoint the split point
|
||||
* @return a SplitRegionRequest
|
||||
*/
|
||||
public static SplitRegionRequest buildSplitRegionRequest(
|
||||
final byte[] regionName, final byte[] splitPoint) {
|
||||
SplitRegionRequest.Builder builder = SplitRegionRequest.newBuilder();
|
||||
RegionSpecifier region = RequestConverter.buildRegionSpecifier(
|
||||
RegionSpecifierType.REGION_NAME, regionName);
|
||||
builder.setRegion(region);
|
||||
if (splitPoint != null) {
|
||||
builder.setSplitPoint(UnsafeByteOperations.unsafeWrap(splitPoint));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
* Create a SplitRegionRequest for a given region name
|
||||
* @param regionName the name of the region to split
|
||||
* @param splitPoint the split point
|
||||
* @return a SplitRegionRequest
|
||||
* @deprecated Use {@link #buildSplitRegionRequest(byte[], Optional)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static SplitRegionRequest buildSplitRegionRequest(final byte[] regionName,
|
||||
final byte[] splitPoint) {
|
||||
return buildSplitRegionRequest(regionName, Optional.ofNullable(splitPoint));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a SplitRegionRequest for a given region name
|
||||
* @param regionName the name of the region to split
|
||||
* @param splitPoint the split point
|
||||
* @return a SplitRegionRequest
|
||||
*/
|
||||
public static SplitRegionRequest buildSplitRegionRequest(byte[] regionName,
|
||||
Optional<byte[]> splitPoint) {
|
||||
SplitRegionRequest.Builder builder = SplitRegionRequest.newBuilder();
|
||||
RegionSpecifier region =
|
||||
RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
|
||||
builder.setRegion(region);
|
||||
splitPoint.ifPresent(sp -> builder.setSplitPoint(UnsafeByteOperations.unsafeWrap(sp)));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static ProcedureDescription buildProcedureDescription(String signature, String instance,
|
||||
Map<String, String> props) {
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.shaded.protobuf;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -919,25 +920,37 @@ public final class RequestConverter {
|
|||
builder.setRegionInfo(HRegionInfo.convert(regionInfo));
|
||||
return builder.build();
|
||||
}
|
||||
/**
|
||||
* Create a CompactRegionRequest for a given region name
|
||||
*
|
||||
* @param regionName the name of the region to get info
|
||||
* @param major indicator if it is a major compaction
|
||||
* @return a CompactRegionRequest
|
||||
*/
|
||||
public static CompactRegionRequest buildCompactRegionRequest(
|
||||
final byte[] regionName, final boolean major, final byte [] family) {
|
||||
CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder();
|
||||
RegionSpecifier region = buildRegionSpecifier(
|
||||
RegionSpecifierType.REGION_NAME, regionName);
|
||||
builder.setRegion(region);
|
||||
builder.setMajor(major);
|
||||
if (family != null) {
|
||||
builder.setFamily(UnsafeByteOperations.unsafeWrap(family));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a CompactRegionRequest for a given region name
|
||||
* @param regionName the name of the region to get info
|
||||
* @param major indicator if it is a major compaction
|
||||
* @param columnFamily
|
||||
* @return a CompactRegionRequest
|
||||
* @deprecated Use {@link #buildCompactRegionRequest(byte[], boolean, Optional)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
|
||||
byte[] columnFamily) {
|
||||
return buildCompactRegionRequest(regionName, major, Optional.ofNullable(columnFamily));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a CompactRegionRequest for a given region name
|
||||
* @param regionName the name of the region to get info
|
||||
* @param major indicator if it is a major compaction
|
||||
* @param columnFamily
|
||||
* @return a CompactRegionRequest
|
||||
*/
|
||||
public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
|
||||
Optional<byte[]> columnFamily) {
|
||||
CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder();
|
||||
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
|
||||
builder.setRegion(region);
|
||||
builder.setMajor(major);
|
||||
columnFamily.ifPresent(family -> builder.setFamily(UnsafeByteOperations.unsafeWrap(family)));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see {@link #buildRollWALWriterRequest()}
|
||||
|
@ -1084,12 +1097,13 @@ public final class RequestConverter {
|
|||
|
||||
/**
|
||||
* Create a protocol buffer MoveRegionRequest
|
||||
*
|
||||
* @param encodedRegionName
|
||||
* @param destServerName
|
||||
* @return A MoveRegionRequest
|
||||
* @throws DeserializationException
|
||||
* @deprecated Use {@link #buildMoveRegionRequest(byte[], Optional)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static MoveRegionRequest buildMoveRegionRequest(
|
||||
final byte [] encodedRegionName, final byte [] destServerName) throws
|
||||
DeserializationException {
|
||||
|
@ -1103,6 +1117,22 @@ public final class RequestConverter {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a protocol buffer MoveRegionRequest
|
||||
* @param encodedRegionName
|
||||
* @param destServerName
|
||||
* @return A MoveRegionRequest
|
||||
*/
|
||||
public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName,
|
||||
Optional<ServerName> destServerName) {
|
||||
MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder();
|
||||
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME,
|
||||
encodedRegionName));
|
||||
destServerName.ifPresent(serverName -> builder.setDestServerName(ProtobufUtil
|
||||
.toServerName(serverName)));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static MergeTableRegionsRequest buildMergeTableRegionsRequest(
|
||||
final byte[][] encodedNameOfdaughaterRegions,
|
||||
final boolean forcible,
|
||||
|
@ -1310,11 +1340,25 @@ public final class RequestConverter {
|
|||
* @param pattern The compiled regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return a GetTableDescriptorsRequest
|
||||
* @deprecated Use {@link #buildGetTableDescriptorsRequest(Optional, boolean)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(final Pattern pattern,
|
||||
boolean includeSysTables) {
|
||||
return buildGetTableDescriptorsRequest(Optional.ofNullable(pattern), includeSysTables);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a protocol buffer GetTableDescriptorsRequest
|
||||
*
|
||||
* @param pattern The compiled regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return a GetTableDescriptorsRequest
|
||||
*/
|
||||
public static GetTableDescriptorsRequest
|
||||
buildGetTableDescriptorsRequest(Optional<Pattern> pattern, boolean includeSysTables) {
|
||||
GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder();
|
||||
if (pattern != null) builder.setRegex(pattern.toString());
|
||||
pattern.ifPresent(p -> builder.setRegex(p.toString()));
|
||||
builder.setIncludeSysTables(includeSysTables);
|
||||
return builder.build();
|
||||
}
|
||||
|
@ -1325,11 +1369,25 @@ public final class RequestConverter {
|
|||
* @param pattern The compiled regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return a GetTableNamesRequest
|
||||
* @deprecated Use {@link #buildGetTableNamesRequest(Optional, boolean)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern pattern,
|
||||
boolean includeSysTables) {
|
||||
return buildGetTableNamesRequest(Optional.ofNullable(pattern), includeSysTables);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a protocol buffer GetTableNamesRequest
|
||||
*
|
||||
* @param pattern The compiled regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return a GetTableNamesRequest
|
||||
*/
|
||||
public static GetTableNamesRequest buildGetTableNamesRequest(Optional<Pattern> pattern,
|
||||
boolean includeSysTables) {
|
||||
GetTableNamesRequest.Builder builder = GetTableNamesRequest.newBuilder();
|
||||
if (pattern != null) builder.setRegex(pattern.toString());
|
||||
pattern.ifPresent(p -> builder.setRegex(p.toString()));
|
||||
builder.setIncludeSysTables(includeSysTables);
|
||||
return builder.build();
|
||||
}
|
||||
|
@ -1635,11 +1693,18 @@ public final class RequestConverter {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #buildListReplicationPeersRequest(Optional)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static ListReplicationPeersRequest buildListReplicationPeersRequest(Pattern pattern) {
|
||||
return buildListReplicationPeersRequest(Optional.ofNullable(pattern));
|
||||
}
|
||||
|
||||
public static ListReplicationPeersRequest
|
||||
buildListReplicationPeersRequest(Optional<Pattern> pattern) {
|
||||
ListReplicationPeersRequest.Builder builder = ListReplicationPeersRequest.newBuilder();
|
||||
if (pattern != null) {
|
||||
builder.setRegex(pattern.toString());
|
||||
}
|
||||
pattern.ifPresent(p -> builder.setRegex(p.toString()));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -136,7 +137,7 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS
|
|||
conn.getAdmin(getLocation().getServerName());
|
||||
CompactRegionRequest request =
|
||||
RequestConverter.buildCompactRegionRequest(
|
||||
getLocation().getRegionInfo().getRegionName(), true, null);
|
||||
getLocation().getRegionInfo().getRegionName(), true, Optional.empty());
|
||||
server.compactRegion(null, request);
|
||||
numCompactions.incrementAndGet();
|
||||
return null;
|
||||
|
|
|
@ -29,23 +29,23 @@ public class TestAsyncBalancerAdminApi extends TestAsyncAdminBase {
|
|||
|
||||
@Test
|
||||
public void testBalancer() throws Exception {
|
||||
boolean initialState = admin.isBalancerEnabled().get();
|
||||
boolean initialState = admin.isBalancerOn().get();
|
||||
|
||||
// Start the balancer, wait for it.
|
||||
boolean prevState = admin.setBalancerRunning(!initialState).get();
|
||||
boolean prevState = admin.setBalancerOn(!initialState).get();
|
||||
|
||||
// The previous state should be the original state we observed
|
||||
assertEquals(initialState, prevState);
|
||||
|
||||
// Current state should be opposite of the original
|
||||
assertEquals(!initialState, admin.isBalancerEnabled().get());
|
||||
assertEquals(!initialState, admin.isBalancerOn().get());
|
||||
|
||||
// Reset it back to what it was
|
||||
prevState = admin.setBalancerRunning(initialState).get();
|
||||
prevState = admin.setBalancerOn(initialState).get();
|
||||
|
||||
// The previous state should be the opposite of the initial state
|
||||
assertEquals(!initialState, prevState);
|
||||
// Current state should be the original state again
|
||||
assertEquals(initialState, admin.isBalancerEnabled().get());
|
||||
assertEquals(initialState, admin.isBalancerOn().get());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ public class TestAsyncNamespaceAdminApi extends TestAsyncAdminBase {
|
|||
|
||||
// create namespace and verify
|
||||
admin.createNamespace(NamespaceDescriptor.create(nsName).build()).join();
|
||||
assertEquals(3, admin.listNamespaceDescriptors().get().length);
|
||||
assertEquals(3, admin.listNamespaceDescriptors().get().size());
|
||||
TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
|
||||
@Override
|
||||
public boolean evaluate() throws Exception {
|
||||
|
@ -84,7 +84,7 @@ public class TestAsyncNamespaceAdminApi extends TestAsyncAdminBase {
|
|||
assertNotNull(zkNamespaceManager.get(nsName));
|
||||
// delete namespace and verify
|
||||
admin.deleteNamespace(nsName).join();
|
||||
assertEquals(2, admin.listNamespaceDescriptors().get().length);
|
||||
assertEquals(2, admin.listNamespaceDescriptors().get().size());
|
||||
assertEquals(2, zkNamespaceManager.list().size());
|
||||
assertNull(zkNamespaceManager.get(nsName));
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
|
@ -94,8 +95,8 @@ public class TestAsyncProcedureAdminApi extends TestAsyncAdminBase {
|
|||
|
||||
@Test
|
||||
public void listProcedure() throws Exception {
|
||||
ProcedureInfo[] procList = admin.listProcedures().get();
|
||||
assertTrue(procList.length >= 0);
|
||||
List<ProcedureInfo> procList = admin.listProcedures().get();
|
||||
assertTrue(procList.size() >= 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -25,6 +25,7 @@ import static org.junit.Assert.fail;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -84,8 +85,8 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
|||
for (HRegionInfo regionInfo : onlineRegions) {
|
||||
if (!regionInfo.getTable().isSystemTable()) {
|
||||
info = regionInfo;
|
||||
boolean closed = admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(),
|
||||
rs.getServerName().getServerName()).get();
|
||||
boolean closed = admin.closeRegion(regionInfo.getRegionName(),
|
||||
Optional.of(rs.getServerName())).get();
|
||||
assertTrue(closed);
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +115,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
|||
info = regionInfo;
|
||||
boolean catchNotServingException = false;
|
||||
try {
|
||||
admin.closeRegionWithEncodedRegionName("sample", rs.getServerName().getServerName())
|
||||
admin.closeRegion(Bytes.toBytes("sample"), Optional.of(rs.getServerName()))
|
||||
.get();
|
||||
} catch (Exception e) {
|
||||
catchNotServingException = true;
|
||||
|
@ -129,77 +130,20 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
|||
onlineRegions.contains(info));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCloseRegionWhenServerNameIsNull() throws Exception {
|
||||
byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion3");
|
||||
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
||||
|
||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
||||
|
||||
try {
|
||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||
for (HRegionInfo regionInfo : onlineRegions) {
|
||||
if (!regionInfo.isMetaTable()) {
|
||||
if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion3")) {
|
||||
admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), null).get();
|
||||
}
|
||||
}
|
||||
}
|
||||
fail("The test should throw exception if the servername passed is null.");
|
||||
} catch (IllegalArgumentException e) {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCloseRegionWhenServerNameIsEmpty() throws Exception {
|
||||
byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegionWhenServerNameIsEmpty");
|
||||
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
||||
|
||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
||||
|
||||
try {
|
||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||
for (HRegionInfo regionInfo : onlineRegions) {
|
||||
if (!regionInfo.isMetaTable()) {
|
||||
if (regionInfo.getRegionNameAsString()
|
||||
.contains("TestHBACloseRegionWhenServerNameIsEmpty")) {
|
||||
admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), " ").get();
|
||||
}
|
||||
}
|
||||
}
|
||||
fail("The test should throw exception if the servername passed is empty.");
|
||||
} catch (IllegalArgumentException e) {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCloseRegionWhenEncodedRegionNameIsNotGiven() throws Exception {
|
||||
byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion4");
|
||||
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
||||
|
||||
HRegionInfo info = null;
|
||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
||||
|
||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||
for (HRegionInfo regionInfo : onlineRegions) {
|
||||
if (!regionInfo.isMetaTable()) {
|
||||
if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion4")) {
|
||||
info = regionInfo;
|
||||
boolean catchNotServingException = false;
|
||||
try {
|
||||
admin.closeRegionWithEncodedRegionName(regionInfo.getRegionNameAsString(),
|
||||
rs.getServerName().getServerName()).get();
|
||||
} catch (Exception e) {
|
||||
// expected, ignore it.
|
||||
catchNotServingException = true;
|
||||
}
|
||||
assertTrue(catchNotServingException);
|
||||
if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegionWhenServerNameIsEmpty")) {
|
||||
admin.closeRegion(regionInfo.getRegionName(), Optional.empty()).get();
|
||||
}
|
||||
}
|
||||
}
|
||||
onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||
assertTrue("The region should be present in online regions list.",
|
||||
onlineRegions.contains(info));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -214,10 +158,10 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
|||
HRegionLocation regionLocation = locator.getRegionLocation(Bytes.toBytes("mmm"));
|
||||
HRegionInfo region = regionLocation.getRegionInfo();
|
||||
byte[] regionName = region.getRegionName();
|
||||
Pair<HRegionInfo, ServerName> pair = rawAdmin.getRegion(regionName).get();
|
||||
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
|
||||
pair = rawAdmin.getRegion(region.getEncodedNameAsBytes()).get();
|
||||
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
|
||||
HRegionLocation location = rawAdmin.getRegionLocation(regionName).get();
|
||||
assertTrue(Bytes.equals(regionName, location.getRegionInfo().getRegionName()));
|
||||
location = rawAdmin.getRegionLocation(region.getEncodedNameAsBytes()).get();
|
||||
assertTrue(Bytes.equals(regionName, location.getRegionInfo().getRegionName()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -289,7 +233,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
|||
table.put(puts);
|
||||
|
||||
if (isSplitRegion) {
|
||||
admin.splitRegion(regions.get(0).getRegionName(), splitPoint).get();
|
||||
admin.splitRegion(regions.get(0).getRegionName(), Optional.ofNullable(splitPoint)).get();
|
||||
} else {
|
||||
if (splitPoint == null) {
|
||||
admin.split(tableName).get();
|
||||
|
@ -456,7 +400,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
|||
}
|
||||
}
|
||||
assertTrue(destServerName != null && !destServerName.equals(serverName));
|
||||
admin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(destServerName.getServerName())).get();
|
||||
admin.move(hri.getRegionName(), Optional.of(destServerName)).get();
|
||||
|
||||
long timeoutTime = System.currentTimeMillis() + 30000;
|
||||
while (true) {
|
||||
|
@ -607,15 +551,15 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
|||
assertTrue(countBefore > 0); // there should be some data files
|
||||
if (expectedState == CompactionState.MINOR) {
|
||||
if (singleFamily) {
|
||||
admin.compact(table, family).get();
|
||||
admin.compact(table, Optional.of(family)).get();
|
||||
} else {
|
||||
admin.compact(table).get();
|
||||
admin.compact(table, Optional.empty()).get();
|
||||
}
|
||||
} else {
|
||||
if (singleFamily) {
|
||||
admin.majorCompact(table, family).get();
|
||||
admin.majorCompact(table, Optional.of(family)).get();
|
||||
} else {
|
||||
admin.majorCompact(table).get();
|
||||
admin.majorCompact(table, Optional.empty()).get();
|
||||
}
|
||||
}
|
||||
long curt = System.currentTimeMillis();
|
||||
|
|
|
@ -53,9 +53,9 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase {
|
|||
|
||||
@After
|
||||
public void cleanup() throws Exception {
|
||||
admin.deleteSnapshots(".*").get();
|
||||
admin.disableTables(".*").get();
|
||||
admin.deleteTables(".*").get();
|
||||
admin.deleteSnapshots(Pattern.compile(".*")).get();
|
||||
admin.disableTables(Pattern.compile(".*")).get();
|
||||
admin.deleteTables(Pattern.compile(".*")).get();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -175,13 +175,22 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase {
|
|||
admin.snapshot(snapshotName3, tableName).get();
|
||||
Assert.assertEquals(admin.listSnapshots().get().size(), 3);
|
||||
|
||||
Assert.assertEquals(admin.listSnapshots("(.*)").get().size(), 3);
|
||||
Assert.assertEquals(admin.listSnapshots("snapshotName(\\d+)").get().size(), 3);
|
||||
Assert.assertEquals(admin.listSnapshots("snapshotName[1|3]").get().size(), 2);
|
||||
Assert.assertEquals(admin.listSnapshots(Pattern.compile("(.*)")).get().size(), 3);
|
||||
Assert.assertEquals(admin.listSnapshots(Pattern.compile("snapshotName(\\d+)")).get().size(), 3);
|
||||
Assert.assertEquals(admin.listSnapshots(Pattern.compile("snapshotName[1|3]")).get().size(), 2);
|
||||
Assert.assertEquals(admin.listSnapshots(Pattern.compile("snapshot(.*)")).get().size(), 3);
|
||||
Assert.assertEquals(admin.listTableSnapshots("testListSnapshots", "s(.*)").get().size(), 3);
|
||||
Assert.assertEquals(admin.listTableSnapshots("fakeTableName", "snap(.*)").get().size(), 0);
|
||||
Assert.assertEquals(admin.listTableSnapshots("test(.*)", "snap(.*)[1|3]").get().size(), 2);
|
||||
Assert.assertEquals(
|
||||
admin.listTableSnapshots(Pattern.compile("testListSnapshots"), Pattern.compile("s(.*)")).get()
|
||||
.size(),
|
||||
3);
|
||||
Assert.assertEquals(
|
||||
admin.listTableSnapshots(Pattern.compile("fakeTableName"), Pattern.compile("snap(.*)")).get()
|
||||
.size(),
|
||||
0);
|
||||
Assert.assertEquals(
|
||||
admin.listTableSnapshots(Pattern.compile("test(.*)"), Pattern.compile("snap(.*)[1|3]")).get()
|
||||
.size(),
|
||||
2);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -201,19 +210,19 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase {
|
|||
admin.deleteSnapshot(snapshotName1).get();
|
||||
Assert.assertEquals(admin.listSnapshots().get().size(), 2);
|
||||
|
||||
admin.deleteSnapshots("(.*)abc").get();
|
||||
admin.deleteSnapshots(Pattern.compile("(.*)abc")).get();
|
||||
Assert.assertEquals(admin.listSnapshots().get().size(), 2);
|
||||
|
||||
admin.deleteSnapshots("(.*)1").get();
|
||||
admin.deleteSnapshots(Pattern.compile("(.*)1")).get();
|
||||
Assert.assertEquals(admin.listSnapshots().get().size(), 2);
|
||||
|
||||
admin.deleteTableSnapshots("(.*)", "(.*)1").get();
|
||||
admin.deleteTableSnapshots(Pattern.compile("(.*)"), Pattern.compile("(.*)1")).get();
|
||||
Assert.assertEquals(admin.listSnapshots().get().size(), 2);
|
||||
|
||||
admin.deleteTableSnapshots("(.*)", "(.*)2").get();
|
||||
admin.deleteTableSnapshots(Pattern.compile("(.*)"), Pattern.compile("(.*)2")).get();
|
||||
Assert.assertEquals(admin.listSnapshots().get().size(), 1);
|
||||
|
||||
admin.deleteTableSnapshots("(.*)", "(.*)3").get();
|
||||
admin.deleteTableSnapshots(Pattern.compile("(.*)"), Pattern.compile("(.*)3")).get();
|
||||
Assert.assertEquals(admin.listSnapshots().get().size(), 0);
|
||||
}
|
||||
}
|
|
@ -80,7 +80,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
|
||||
@Test
|
||||
public void testListTables() throws Exception {
|
||||
int numTables = admin.listTables().get().length;
|
||||
int numTables = admin.listTables().get().size();
|
||||
final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1");
|
||||
final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2");
|
||||
final TableName tableName3 = TableName.valueOf(name.getMethodName() + "3");
|
||||
|
@ -89,13 +89,13 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
TEST_UTIL.createTable(tables[i], FAMILY);
|
||||
}
|
||||
|
||||
TableDescriptor[] tableDescs = admin.listTables().get();
|
||||
int size = tableDescs.length;
|
||||
List<TableDescriptor> tableDescs = admin.listTables().get();
|
||||
int size = tableDescs.size();
|
||||
assertTrue(size >= tables.length);
|
||||
for (int i = 0; i < tables.length && i < size; i++) {
|
||||
boolean found = false;
|
||||
for (int j = 0; j < tableDescs.length; j++) {
|
||||
if (tableDescs[j].getTableName().equals(tables[i])) {
|
||||
for (int j = 0; j < size; j++) {
|
||||
if (tableDescs.get(j).getTableName().equals(tables[i])) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
@ -103,13 +103,13 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
assertTrue("Not found: " + tables[i], found);
|
||||
}
|
||||
|
||||
TableName[] tableNames = admin.listTableNames().get();
|
||||
size = tableNames.length;
|
||||
List<TableName> tableNames = admin.listTableNames().get();
|
||||
size = tableNames.size();
|
||||
assertTrue(size == (numTables + tables.length));
|
||||
for (int i = 0; i < tables.length && i < size; i++) {
|
||||
boolean found = false;
|
||||
for (int j = 0; j < tableNames.length; j++) {
|
||||
if (tableNames[j].equals(tables[i])) {
|
||||
for (int j = 0; j < size; j++) {
|
||||
if (tableNames.get(j).equals(tables[i])) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
@ -121,10 +121,10 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
TEST_UTIL.deleteTable(tables[i]);
|
||||
}
|
||||
|
||||
tableDescs = admin.listTables((Pattern) null, true).get();
|
||||
assertTrue("Not found system tables", tableDescs.length > 0);
|
||||
tableNames = admin.listTableNames((Pattern) null, true).get();
|
||||
assertTrue("Not found system tables", tableNames.length > 0);
|
||||
tableDescs = admin.listTables(Optional.empty(), true).get();
|
||||
assertTrue("Not found system tables", tableDescs.size() > 0);
|
||||
tableNames = admin.listTableNames(Optional.empty(), true).get();
|
||||
assertTrue("Not found system tables", tableNames.size() > 0);
|
||||
}
|
||||
|
||||
@Test(timeout = 300000)
|
||||
|
@ -143,13 +143,13 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
|
||||
@Test(timeout = 300000)
|
||||
public void testCreateTable() throws Exception {
|
||||
TableDescriptor[] tables = admin.listTables().get();
|
||||
int numTables = tables.length;
|
||||
List<TableDescriptor> tables = admin.listTables().get();
|
||||
int numTables = tables.size();
|
||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||
admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(FAMILY)))
|
||||
.join();
|
||||
tables = admin.listTables().get();
|
||||
assertEquals(numTables + 1, tables.length);
|
||||
assertEquals(numTables + 1, tables.size());
|
||||
assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster()
|
||||
.getTableStateManager().isTableState(tableName, TableState.State.ENABLED));
|
||||
assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName));
|
||||
|
@ -449,8 +449,8 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
} catch (Exception e) {
|
||||
}
|
||||
});
|
||||
TableDescriptor[] failed = admin.deleteTables(Pattern.compile("testDeleteTables.*")).get();
|
||||
assertEquals(0, failed.length);
|
||||
List<TableDescriptor> failed = admin.deleteTables(Pattern.compile("testDeleteTables.*")).get();
|
||||
assertEquals(0, failed.size());
|
||||
Arrays.stream(tables).forEach((table) -> {
|
||||
admin.tableExists(table).thenAccept((exist) -> assertFalse(exist)).join();
|
||||
});
|
||||
|
@ -572,7 +572,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
ht1.get(get);
|
||||
ht2.get(get);
|
||||
|
||||
this.admin.disableTables("testDisableAndEnableTable.*").join();
|
||||
this.admin.disableTables(Pattern.compile("testDisableAndEnableTable.*")).join();
|
||||
|
||||
// Test that tables are disabled
|
||||
get = new Get(row);
|
||||
|
@ -589,7 +589,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
assertEquals(TableState.State.DISABLED, getStateFromMeta(tableName2));
|
||||
|
||||
assertTrue(ok);
|
||||
this.admin.enableTables("testDisableAndEnableTable.*").join();
|
||||
this.admin.enableTables(Pattern.compile("testDisableAndEnableTable.*")).join();
|
||||
|
||||
// Test that tables are enabled
|
||||
try {
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.ArrayList;
|
|||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -236,7 +237,7 @@ public class TestHRegionServerBulkLoad {
|
|||
AdminProtos.AdminService.BlockingInterface server =
|
||||
conn.getAdmin(getLocation().getServerName());
|
||||
CompactRegionRequest request = RequestConverter.buildCompactRegionRequest(
|
||||
getLocation().getRegionInfo().getRegionName(), true, null);
|
||||
getLocation().getRegionInfo().getRegionName(), true, Optional.empty());
|
||||
server.compactRegion(null, request);
|
||||
numCompactions.incrementAndGet();
|
||||
return null;
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -122,7 +123,7 @@ public class TestHRegionServerBulkLoadWithOldClient extends TestHRegionServerBul
|
|||
conn.getAdmin(getLocation().getServerName());
|
||||
CompactRegionRequest request =
|
||||
RequestConverter.buildCompactRegionRequest(
|
||||
getLocation().getRegionInfo().getRegionName(), true, null);
|
||||
getLocation().getRegionInfo().getRegionName(), true, Optional.empty());
|
||||
server.compactRegion(null, request);
|
||||
numCompactions.incrementAndGet();
|
||||
return null;
|
||||
|
|
Loading…
Reference in New Issue