HBASE-15583 Any HTableDescriptor we give out should be immutable
This commit is contained in:
parent
8973582bc6
commit
053e61541e
|
@ -133,7 +133,7 @@ end
|
|||
|
||||
# query the master to see how many regions are on region servers
|
||||
if not $tablename.nil?
|
||||
$TableName = HTableDescriptor.new($tablename.to_java_bytes).getTableName()
|
||||
$TableName = TableName.valueOf($tablename.to_java_bytes)
|
||||
end
|
||||
while true
|
||||
if $tablename.nil?
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -95,7 +95,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
/**
|
||||
* List all the userspace tables.
|
||||
*
|
||||
* @return - returns an array of HTableDescriptors
|
||||
* @return - returns an array of read-only HTableDescriptors
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HTableDescriptor[] listTables() throws IOException;
|
||||
|
@ -104,7 +104,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
* List all the userspace tables matching the given pattern.
|
||||
*
|
||||
* @param pattern The compiled regular expression to match against
|
||||
* @return - returns an array of HTableDescriptors
|
||||
* @return - returns an array of read-only HTableDescriptors
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @see #listTables()
|
||||
*/
|
||||
|
@ -125,7 +125,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
*
|
||||
* @param pattern The compiled regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return - returns an array of HTableDescriptors
|
||||
* @return - returns an array of read-only HTableDescriptors
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @see #listTables()
|
||||
*/
|
||||
|
@ -137,7 +137,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
*
|
||||
* @param regex The regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return - returns an array of HTableDescriptors
|
||||
* @return - returns an array of read-only HTableDescriptors
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @see #listTables(java.util.regex.Pattern, boolean)
|
||||
*/
|
||||
|
@ -192,7 +192,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
* Method for getting the tableDescriptor
|
||||
*
|
||||
* @param tableName as a {@link TableName}
|
||||
* @return the tableDescriptor
|
||||
* @return the read-only tableDescriptor
|
||||
* @throws org.apache.hadoop.hbase.TableNotFoundException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
|
@ -293,7 +293,8 @@ public interface Admin extends Abortable, Closeable {
|
|||
* #listTables(java.lang.String)} and {@link #deleteTable(org.apache.hadoop.hbase.TableName)}
|
||||
*
|
||||
* @param regex The regular expression to match table names against
|
||||
* @return Table descriptors for tables that couldn't be deleted
|
||||
* @return Table descriptors for tables that couldn't be deleted.
|
||||
* The return htds are read-only
|
||||
* @throws IOException
|
||||
* @see #deleteTables(java.util.regex.Pattern)
|
||||
* @see #deleteTable(org.apache.hadoop.hbase.TableName)
|
||||
|
@ -308,6 +309,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
*
|
||||
* @param pattern The pattern to match table names against
|
||||
* @return Table descriptors for tables that couldn't be deleted
|
||||
* The return htds are read-only
|
||||
* @throws IOException
|
||||
*/
|
||||
HTableDescriptor[] deleteTables(Pattern pattern) throws IOException;
|
||||
|
@ -373,6 +375,8 @@ public interface Admin extends Abortable, Closeable {
|
|||
*
|
||||
* @param regex The regular expression to match table names against
|
||||
* @throws IOException
|
||||
* @return Table descriptors for tables that couldn't be enabled.
|
||||
* The return HTDs are read-only.
|
||||
* @see #enableTables(java.util.regex.Pattern)
|
||||
* @see #enableTable(org.apache.hadoop.hbase.TableName)
|
||||
*/
|
||||
|
@ -386,6 +390,8 @@ public interface Admin extends Abortable, Closeable {
|
|||
*
|
||||
* @param pattern The pattern to match table names against
|
||||
* @throws IOException
|
||||
* @return Table descriptors for tables that couldn't be enabled.
|
||||
* The return HTDs are read-only.
|
||||
*/
|
||||
HTableDescriptor[] enableTables(Pattern pattern) throws IOException;
|
||||
|
||||
|
@ -422,6 +428,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
*
|
||||
* @param regex The regular expression to match table names against
|
||||
* @return Table descriptors for tables that couldn't be disabled
|
||||
* The return htds are read-only
|
||||
* @throws IOException
|
||||
* @see #disableTables(java.util.regex.Pattern)
|
||||
* @see #disableTable(org.apache.hadoop.hbase.TableName)
|
||||
|
@ -436,6 +443,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
*
|
||||
* @param pattern The pattern to match table names against
|
||||
* @return Table descriptors for tables that couldn't be disabled
|
||||
* The return htds are read-only
|
||||
* @throws IOException
|
||||
*/
|
||||
HTableDescriptor[] disableTables(Pattern pattern) throws IOException;
|
||||
|
@ -1166,7 +1174,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
* Get list of table descriptors by namespace
|
||||
*
|
||||
* @param name namespace name
|
||||
* @return A descriptor
|
||||
* @return HTD[] the read-only tableDescriptors
|
||||
* @throws IOException
|
||||
*/
|
||||
HTableDescriptor[] listTableDescriptorsByNamespace(final String name)
|
||||
|
@ -1199,7 +1207,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
* Get tableDescriptors
|
||||
*
|
||||
* @param tableNames List of table names
|
||||
* @return HTD[] the tableDescriptor
|
||||
* @return HTD[] the read-only tableDescriptors
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HTableDescriptor[] getTableDescriptorsByTableName(List<TableName> tableNames)
|
||||
|
@ -1209,7 +1217,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
* Get tableDescriptors
|
||||
*
|
||||
* @param names List of table names
|
||||
* @return HTD[] the tableDescriptor
|
||||
* @return HTD[] the read-only tableDescriptors
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HTableDescriptor[] getTableDescriptors(List<String> names)
|
||||
|
|
|
@ -25,7 +25,6 @@ import java.util.regex.Pattern;
|
|||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -57,27 +56,27 @@ public interface AsyncAdmin {
|
|||
|
||||
/**
|
||||
* List all the userspace tables.
|
||||
* @return - returns an array of HTableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
* @return - returns an array of TableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
* @see #listTables(Pattern, boolean)
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor[]> listTables();
|
||||
CompletableFuture<TableDescriptor[]> listTables();
|
||||
|
||||
/**
|
||||
* List all the tables matching the given pattern.
|
||||
* @param regex The regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return - returns an array of HTableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
* @return - returns an array of TableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
* @see #listTables(Pattern, boolean)
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor[]> listTables(String regex, boolean includeSysTables);
|
||||
CompletableFuture<TableDescriptor[]> listTables(String regex, boolean includeSysTables);
|
||||
|
||||
/**
|
||||
* List all the tables matching the given pattern.
|
||||
* @param pattern The compiled regular expression to match against
|
||||
* @param includeSysTables False to match only against userspace tables
|
||||
* @return - returns an array of HTableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
* @return - returns an array of TableDescriptors wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables);
|
||||
CompletableFuture<TableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables);
|
||||
|
||||
/**
|
||||
* List all of the names of userspace tables.
|
||||
|
@ -107,15 +106,15 @@ public interface AsyncAdmin {
|
|||
/**
|
||||
* Method for getting the tableDescriptor
|
||||
* @param tableName as a {@link TableName}
|
||||
* @return the tableDescriptor wrapped by a {@link CompletableFuture}.
|
||||
* @return the read-only tableDescriptor wrapped by a {@link CompletableFuture}.
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor> getTableDescriptor(final TableName tableName);
|
||||
CompletableFuture<TableDescriptor> getTableDescriptor(final TableName tableName);
|
||||
|
||||
/**
|
||||
* Creates a new table.
|
||||
* @param desc table descriptor for table
|
||||
*/
|
||||
CompletableFuture<Void> createTable(HTableDescriptor desc);
|
||||
CompletableFuture<Void> createTable(TableDescriptor desc);
|
||||
|
||||
/**
|
||||
* Creates a new table with the specified number of regions. The start key specified will become
|
||||
|
@ -128,7 +127,7 @@ public interface AsyncAdmin {
|
|||
* @param endKey end of key range
|
||||
* @param numRegions the total number of regions to create
|
||||
*/
|
||||
CompletableFuture<Void> createTable(HTableDescriptor desc, byte[] startKey, byte[] endKey,
|
||||
CompletableFuture<Void> createTable(TableDescriptor desc, byte[] startKey, byte[] endKey,
|
||||
int numRegions);
|
||||
|
||||
/**
|
||||
|
@ -138,7 +137,7 @@ public interface AsyncAdmin {
|
|||
* @param desc table descriptor for table
|
||||
* @param splitKeys array of split keys for the initial regions of the table
|
||||
*/
|
||||
CompletableFuture<Void> createTable(final HTableDescriptor desc, byte[][] splitKeys);
|
||||
CompletableFuture<Void> createTable(final TableDescriptor desc, byte[][] splitKeys);
|
||||
|
||||
/**
|
||||
* Deletes a table.
|
||||
|
@ -153,9 +152,9 @@ public interface AsyncAdmin {
|
|||
* {@link #deleteTable(org.apache.hadoop.hbase.TableName)}
|
||||
* @param regex The regular expression to match table names against
|
||||
* @return Table descriptors for tables that couldn't be deleted. The return value will be wrapped
|
||||
* by a {@link CompletableFuture}.
|
||||
* by a {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor[]> deleteTables(String regex);
|
||||
CompletableFuture<TableDescriptor[]> deleteTables(String regex);
|
||||
|
||||
/**
|
||||
* Delete tables matching the passed in pattern and wait on completion. Warning: Use this method
|
||||
|
@ -164,9 +163,9 @@ public interface AsyncAdmin {
|
|||
* {@link #deleteTable(org.apache.hadoop.hbase.TableName)}
|
||||
* @param pattern The pattern to match table names against
|
||||
* @return Table descriptors for tables that couldn't be deleted. The return value will be wrapped
|
||||
* by a {@link CompletableFuture}.
|
||||
* by a {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor[]> deleteTables(Pattern pattern);
|
||||
CompletableFuture<TableDescriptor[]> deleteTables(Pattern pattern);
|
||||
|
||||
/**
|
||||
* Truncate a table.
|
||||
|
@ -187,9 +186,9 @@ public interface AsyncAdmin {
|
|||
* {@link #enableTable(TableName)}
|
||||
* @param regex The regular expression to match table names against
|
||||
* @return Table descriptors for tables that couldn't be enabled. The return value will be wrapped
|
||||
* by a {@link CompletableFuture}.
|
||||
* by a {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor[]> enableTables(String regex);
|
||||
CompletableFuture<TableDescriptor[]> enableTables(String regex);
|
||||
|
||||
/**
|
||||
* Enable tables matching the passed in pattern. Warning: Use this method carefully, there is no
|
||||
|
@ -197,9 +196,9 @@ public interface AsyncAdmin {
|
|||
* {@link #enableTable(TableName)}
|
||||
* @param pattern The pattern to match table names against
|
||||
* @return Table descriptors for tables that couldn't be enabled. The return value will be wrapped
|
||||
* by a {@link CompletableFuture}.
|
||||
* by a {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor[]> enableTables(Pattern pattern);
|
||||
CompletableFuture<TableDescriptor[]> enableTables(Pattern pattern);
|
||||
|
||||
/**
|
||||
* Disable a table. The table has to be in enabled state for it to be disabled.
|
||||
|
@ -213,9 +212,9 @@ public interface AsyncAdmin {
|
|||
* {@link #disableTable(TableName)}
|
||||
* @param regex The regular expression to match table names against
|
||||
* @return Table descriptors for tables that couldn't be disabled. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
* {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor[]> disableTables(String regex);
|
||||
CompletableFuture<TableDescriptor[]> disableTables(String regex);
|
||||
|
||||
/**
|
||||
* Disable tables matching the passed in pattern. Warning: Use this method carefully, there is no
|
||||
|
@ -223,9 +222,9 @@ public interface AsyncAdmin {
|
|||
* {@link #disableTable(TableName)}
|
||||
* @param pattern The pattern to match table names against
|
||||
* @return Table descriptors for tables that couldn't be disabled. The return value will be wrapped by a
|
||||
* {@link CompletableFuture}.
|
||||
* {@link CompletableFuture}. The return HTDs are read-only.
|
||||
*/
|
||||
CompletableFuture<HTableDescriptor[]> disableTables(Pattern pattern);
|
||||
CompletableFuture<TableDescriptor[]> disableTables(Pattern pattern);
|
||||
|
||||
/**
|
||||
* @param tableName name of table to check
|
||||
|
|
|
@ -44,7 +44,6 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
|
@ -293,10 +292,10 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
CompletableFuture<Void> operate(TableName table);
|
||||
}
|
||||
|
||||
private CompletableFuture<HTableDescriptor[]> batchTableOperations(Pattern pattern,
|
||||
private CompletableFuture<TableDescriptor[]> batchTableOperations(Pattern pattern,
|
||||
TableOperator operator, String operationType) {
|
||||
CompletableFuture<HTableDescriptor[]> future = new CompletableFuture<>();
|
||||
List<HTableDescriptor> failed = new LinkedList<>();
|
||||
CompletableFuture<TableDescriptor[]> future = new CompletableFuture<>();
|
||||
List<TableDescriptor> failed = new LinkedList<>();
|
||||
listTables(pattern, false).whenComplete(
|
||||
(tables, error) -> {
|
||||
if (error != null) {
|
||||
|
@ -311,7 +310,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
}
|
||||
})).<CompletableFuture> toArray(size -> new CompletableFuture[size]);
|
||||
CompletableFuture.allOf(futures).thenAccept((v) -> {
|
||||
future.complete(failed.toArray(new HTableDescriptor[failed.size()]));
|
||||
future.complete(failed.toArray(new TableDescriptor[failed.size()]));
|
||||
});
|
||||
});
|
||||
return future;
|
||||
|
@ -328,25 +327,25 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor[]> listTables() {
|
||||
public CompletableFuture<TableDescriptor[]> listTables() {
|
||||
return listTables((Pattern) null, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor[]> listTables(String regex, boolean includeSysTables) {
|
||||
public CompletableFuture<TableDescriptor[]> listTables(String regex, boolean includeSysTables) {
|
||||
return listTables(Pattern.compile(regex), false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables) {
|
||||
public CompletableFuture<TableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables) {
|
||||
return this
|
||||
.<HTableDescriptor[]>newMasterCaller()
|
||||
.<TableDescriptor[]>newMasterCaller()
|
||||
.action(
|
||||
(controller, stub) -> this
|
||||
.<GetTableDescriptorsRequest, GetTableDescriptorsResponse, HTableDescriptor[]> call(
|
||||
.<GetTableDescriptorsRequest, GetTableDescriptorsResponse, TableDescriptor[]> call(
|
||||
controller, stub, RequestConverter.buildGetTableDescriptorsRequest(pattern,
|
||||
includeSysTables), (s, c, req, done) -> s.getTableDescriptors(c, req, done), (
|
||||
resp) -> ProtobufUtil.getHTableDescriptorArray(resp))).call();
|
||||
resp) -> ProtobufUtil.getTableDescriptorArray(resp))).call();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -372,8 +371,8 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor> getTableDescriptor(TableName tableName) {
|
||||
CompletableFuture<HTableDescriptor> future = new CompletableFuture<>();
|
||||
public CompletableFuture<TableDescriptor> getTableDescriptor(TableName tableName) {
|
||||
CompletableFuture<TableDescriptor> future = new CompletableFuture<>();
|
||||
this.<List<TableSchema>> newMasterCaller()
|
||||
.action(
|
||||
(controller, stub) -> this
|
||||
|
@ -386,7 +385,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
return;
|
||||
}
|
||||
if (!tableSchemas.isEmpty()) {
|
||||
future.complete(ProtobufUtil.convertToHTableDesc(tableSchemas.get(0)));
|
||||
future.complete(ProtobufUtil.convertToTableDesc(tableSchemas.get(0)));
|
||||
} else {
|
||||
future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
|
||||
}
|
||||
|
@ -395,12 +394,12 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Void> createTable(HTableDescriptor desc) {
|
||||
public CompletableFuture<Void> createTable(TableDescriptor desc) {
|
||||
return createTable(desc, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Void> createTable(HTableDescriptor desc, byte[] startKey, byte[] endKey,
|
||||
public CompletableFuture<Void> createTable(TableDescriptor desc, byte[] startKey, byte[] endKey,
|
||||
int numRegions) {
|
||||
try {
|
||||
return createTable(desc, getSplitKeys(startKey, endKey, numRegions));
|
||||
|
@ -410,7 +409,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Void> createTable(HTableDescriptor desc, byte[][] splitKeys) {
|
||||
public CompletableFuture<Void> createTable(TableDescriptor desc, byte[][] splitKeys) {
|
||||
if (desc.getTableName() == null) {
|
||||
return failedFuture(new IllegalArgumentException("TableName cannot be null"));
|
||||
}
|
||||
|
@ -447,12 +446,12 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor[]> deleteTables(String regex) {
|
||||
public CompletableFuture<TableDescriptor[]> deleteTables(String regex) {
|
||||
return deleteTables(Pattern.compile(regex));
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor[]> deleteTables(Pattern pattern) {
|
||||
public CompletableFuture<TableDescriptor[]> deleteTables(Pattern pattern) {
|
||||
return batchTableOperations(pattern, (table) -> deleteTable(table), "DELETE");
|
||||
}
|
||||
|
||||
|
@ -473,12 +472,12 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor[]> enableTables(String regex) {
|
||||
public CompletableFuture<TableDescriptor[]> enableTables(String regex) {
|
||||
return enableTables(Pattern.compile(regex));
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor[]> enableTables(Pattern pattern) {
|
||||
public CompletableFuture<TableDescriptor[]> enableTables(Pattern pattern) {
|
||||
return batchTableOperations(pattern, (table) -> enableTable(table), "ENABLE");
|
||||
}
|
||||
|
||||
|
@ -491,12 +490,12 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor[]> disableTables(String regex) {
|
||||
public CompletableFuture<TableDescriptor[]> disableTables(String regex) {
|
||||
return disableTables(Pattern.compile(regex));
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<HTableDescriptor[]> disableTables(Pattern pattern) {
|
||||
public CompletableFuture<TableDescriptor[]> disableTables(Pattern pattern) {
|
||||
return batchTableOperations(pattern, (table) -> disableTable(table), "DISABLE");
|
||||
}
|
||||
|
||||
|
|
|
@ -441,7 +441,7 @@ public class HBaseAdmin implements Admin {
|
|||
}
|
||||
}, rpcCallerFactory, operationTimeout, rpcTimeout);
|
||||
if (htd != null) {
|
||||
return htd;
|
||||
return new ImmutableHTableDescriptor(htd);
|
||||
}
|
||||
throw new TableNotFoundException(tableName.getNameAsString());
|
||||
}
|
||||
|
@ -532,7 +532,7 @@ public class HBaseAdmin implements Admin {
|
|||
super(admin, desc.getTableName(),
|
||||
(response != null && response.hasProcId()) ? response.getProcId() : null);
|
||||
this.splitKeys = splitKeys;
|
||||
this.desc = desc;
|
||||
this.desc = new ImmutableHTableDescriptor(desc);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -2138,8 +2138,7 @@ public class HBaseAdmin implements Admin {
|
|||
.build()).getTableSchemaList();
|
||||
HTableDescriptor[] res = new HTableDescriptor[list.size()];
|
||||
for(int i=0; i < list.size(); i++) {
|
||||
|
||||
res[i] = ProtobufUtil.convertToHTableDesc(list.get(i));
|
||||
res[i] = new ImmutableHTableDescriptor(ProtobufUtil.convertToHTableDesc(list.get(i)));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -266,7 +266,7 @@ public class HTable implements Table {
|
|||
HTableDescriptor htd = HBaseAdmin.getTableDescriptor(tableName, connection, rpcCallerFactory,
|
||||
rpcControllerFactory, operationTimeout, readRpcTimeout);
|
||||
if (htd != null) {
|
||||
return new UnmodifyableHTableDescriptor(htd);
|
||||
return new ImmutableHTableDescriptor(htd);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Read-only table descriptor.
|
||||
*/
|
||||
@Deprecated // deprecated for hbase 2.0, remove for hbase 3.0. see HTableDescriptor.
|
||||
@InterfaceAudience.Public
|
||||
public class ImmutableHTableDescriptor extends HTableDescriptor {
|
||||
|
||||
/*
|
||||
* Create an unmodifyable copy of an HTableDescriptor
|
||||
* @param desc
|
||||
*/
|
||||
public ImmutableHTableDescriptor(final HTableDescriptor desc) {
|
||||
super(new UnmodifyableTableDescriptor(desc));
|
||||
}
|
||||
|
||||
@Deprecated // deprecated for hbase 2.0, remove for hbase 3.0. see HTableDescriptor.
|
||||
private static class UnmodifyableTableDescriptor extends ModifyableTableDescriptor {
|
||||
|
||||
UnmodifyableTableDescriptor(final TableDescriptor desc) {
|
||||
super(desc);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ModifyableTableDescriptor setFamily(HColumnDescriptor family) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public HColumnDescriptor removeFamily(final byte[] column) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(Bytes key) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public ModifyableTableDescriptor setConfiguration(String key, String value) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeConfiguration(final String key) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,256 @@
|
|||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
|
||||
/**
|
||||
* TableDescriptor contains the details about an HBase table such as the descriptors of
|
||||
* all the column families, is the table a catalog table, <code> -ROOT- </code> or
|
||||
* <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
|
||||
* when the region split should occur, coprocessors associated with it etc...
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface TableDescriptor {
|
||||
|
||||
/**
|
||||
* Returns an array all the {@link HColumnDescriptor} of the column families
|
||||
* of the table.
|
||||
*
|
||||
* @return Array of all the HColumnDescriptors of the current table
|
||||
*
|
||||
* @see #getFamilies()
|
||||
*/
|
||||
HColumnDescriptor[] getColumnFamilies();
|
||||
|
||||
/**
|
||||
* Returns the count of the column families of the table.
|
||||
*
|
||||
* @return Count of column families of the table
|
||||
*/
|
||||
int getColumnFamilyCount();
|
||||
|
||||
/**
|
||||
* Getter for fetching an unmodifiable map.
|
||||
*
|
||||
* @return an unmodifiable map
|
||||
*/
|
||||
Map<String, String> getConfiguration();
|
||||
|
||||
/**
|
||||
* Getter for accessing the configuration value by key
|
||||
*
|
||||
* @param key the key whose associated value is to be returned
|
||||
* @return the value to which the specified key is mapped, or {@code null} if
|
||||
* this map contains no mapping for the key
|
||||
*/
|
||||
String getConfigurationValue(String key);
|
||||
|
||||
/**
|
||||
* Return the list of attached co-processor represented by their name
|
||||
* className
|
||||
*
|
||||
* @return The list of co-processors classNames
|
||||
*/
|
||||
Collection<String> getCoprocessors();
|
||||
|
||||
/**
|
||||
* Returns the durability setting for the table.
|
||||
*
|
||||
* @return durability setting for the table.
|
||||
*/
|
||||
Durability getDurability();
|
||||
|
||||
/**
|
||||
* Returns an unmodifiable collection of all the {@link HColumnDescriptor} of
|
||||
* all the column families of the table.
|
||||
*
|
||||
* @return Immutable collection of {@link HColumnDescriptor} of all the column
|
||||
* families.
|
||||
*/
|
||||
Collection<HColumnDescriptor> getFamilies();
|
||||
|
||||
/**
|
||||
* Returns all the column family names of the current table. The map of
|
||||
* TableDescriptor contains mapping of family name to HColumnDescriptors.
|
||||
* This returns all the keys of the family map which represents the column
|
||||
* family names of the table.
|
||||
*
|
||||
* @return Immutable sorted set of the keys of the families.
|
||||
*/
|
||||
Set<byte[]> getFamiliesKeys();
|
||||
|
||||
/**
|
||||
* Returns the HColumnDescriptor for a specific column family with name as
|
||||
* specified by the parameter column.
|
||||
*
|
||||
* @param column Column family name
|
||||
* @return Column descriptor for the passed family name or the family on
|
||||
* passed in column.
|
||||
*/
|
||||
HColumnDescriptor getFamily(final byte[] column);
|
||||
|
||||
/**
|
||||
* This gets the class associated with the flush policy which determines the
|
||||
* stores need to be flushed when flushing a region. The class used by default
|
||||
* is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy.
|
||||
*
|
||||
* @return the class name of the flush policy for this table. If this returns
|
||||
* null, the default flush policy is used.
|
||||
*/
|
||||
String getFlushPolicyClassName();
|
||||
|
||||
/**
|
||||
* Returns the maximum size upto which a region can grow to after which a
|
||||
* region split is triggered. The region size is represented by the size of
|
||||
* the biggest store file in that region.
|
||||
*
|
||||
* @return max hregion size for table, -1 if not set.
|
||||
*/
|
||||
long getMaxFileSize();
|
||||
|
||||
/**
|
||||
* Returns the size of the memstore after which a flush to filesystem is
|
||||
* triggered.
|
||||
*
|
||||
* @return memory cache flush size for each hregion, -1 if not set.
|
||||
*/
|
||||
long getMemStoreFlushSize();
|
||||
|
||||
int getPriority();
|
||||
|
||||
/**
|
||||
* @return Returns the configured replicas per region
|
||||
*/
|
||||
int getRegionReplication();
|
||||
|
||||
/**
|
||||
* This gets the class associated with the region split policy which
|
||||
* determines when a region split should occur. The class used by default is
|
||||
* defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
|
||||
*
|
||||
* @return the class name of the region split policy for this table. If this
|
||||
* returns null, the default split policy is used.
|
||||
*/
|
||||
String getRegionSplitPolicyClassName();
|
||||
|
||||
/**
|
||||
* Get the name of the table
|
||||
*
|
||||
* @return TableName
|
||||
*/
|
||||
TableName getTableName();
|
||||
|
||||
@Deprecated
|
||||
String getOwnerString();
|
||||
|
||||
/**
|
||||
* Getter for accessing the metadata associated with the key
|
||||
*
|
||||
* @param key The key.
|
||||
* @return The value.
|
||||
*/
|
||||
byte[] getValue(byte[] key);
|
||||
|
||||
/**
|
||||
* @return Getter for fetching an unmodifiable map.
|
||||
*/
|
||||
Map<Bytes, Bytes> getValues();
|
||||
|
||||
/**
|
||||
* Check if the table has an attached co-processor represented by the name
|
||||
* className
|
||||
*
|
||||
* @param classNameToMatch - Class name of the co-processor
|
||||
* @return true of the table has a co-processor className
|
||||
*/
|
||||
boolean hasCoprocessor(String classNameToMatch);
|
||||
|
||||
/**
|
||||
* Checks to see if this table contains the given column family
|
||||
*
|
||||
* @param familyName Family name or column name.
|
||||
* @return true if the table contains the specified family name
|
||||
*/
|
||||
boolean hasFamily(final byte[] familyName);
|
||||
|
||||
/**
|
||||
* @return true if the read-replicas memstore replication is enabled.
|
||||
*/
|
||||
boolean hasRegionMemstoreReplication();
|
||||
|
||||
/**
|
||||
* @return true if there are at least one cf whose replication scope is
|
||||
* serial.
|
||||
*/
|
||||
boolean hasSerialReplicationScope();
|
||||
|
||||
/**
|
||||
* Check if the compaction enable flag of the table is true. If flag is false
|
||||
* then no minor/major compactions will be done in real.
|
||||
*
|
||||
* @return true if table compaction enabled
|
||||
*/
|
||||
boolean isCompactionEnabled();
|
||||
|
||||
/**
|
||||
* Checks if this table is <code> hbase:meta </code> region.
|
||||
*
|
||||
* @return true if this table is <code> hbase:meta </code> region
|
||||
*/
|
||||
boolean isMetaRegion();
|
||||
|
||||
/**
|
||||
* Checks if the table is a <code>hbase:meta</code> table
|
||||
*
|
||||
* @return true if table is <code> hbase:meta </code> region.
|
||||
*/
|
||||
boolean isMetaTable();
|
||||
|
||||
/**
|
||||
* Check if normalization enable flag of the table is true. If flag is false
|
||||
* then no region normalizer won't attempt to normalize this table.
|
||||
*
|
||||
* @return true if region normalization is enabled for this table
|
||||
*/
|
||||
boolean isNormalizationEnabled();
|
||||
|
||||
/**
|
||||
* Check if the readOnly flag of the table is set. If the readOnly flag is set
|
||||
* then the contents of the table can only be read from but not modified.
|
||||
*
|
||||
* @return true if all columns in the table should be read only
|
||||
*/
|
||||
boolean isReadOnly();
|
||||
|
||||
/**
|
||||
* Check if the descriptor represents a <code> -ROOT- </code> region.
|
||||
*
|
||||
* @return true if this is a <code> -ROOT- </code> region
|
||||
*/
|
||||
boolean isRootRegion();
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,127 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Read-only table descriptor.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public class UnmodifyableHTableDescriptor extends HTableDescriptor {
|
||||
/**
|
||||
* Default constructor.
|
||||
* @deprecated As of release 2.0.0. This will be removed in HBase 3.0.0.
|
||||
* Use {@link #UnmodifyableHTableDescriptor(HTableDescriptor)}.
|
||||
*/
|
||||
@Deprecated
|
||||
public UnmodifyableHTableDescriptor() {
|
||||
super();
|
||||
}
|
||||
|
||||
/*
|
||||
* Create an unmodifyable copy of an HTableDescriptor
|
||||
* @param desc
|
||||
*/
|
||||
UnmodifyableHTableDescriptor(final HTableDescriptor desc) {
|
||||
super(desc.getTableName(), getUnmodifyableFamilies(desc), desc.getValues());
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* @param desc
|
||||
* @return Families as unmodifiable array.
|
||||
*/
|
||||
private static HColumnDescriptor[] getUnmodifyableFamilies(
|
||||
final HTableDescriptor desc) {
|
||||
HColumnDescriptor [] f = new HColumnDescriptor[desc.getFamilies().size()];
|
||||
int i = 0;
|
||||
for (HColumnDescriptor c: desc.getFamilies()) {
|
||||
f[i++] = c;
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does NOT add a column family. This object is immutable
|
||||
* @param family HColumnDescriptor of familyto add.
|
||||
*/
|
||||
@Override
|
||||
public UnmodifyableHTableDescriptor addFamily(final HColumnDescriptor family) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public UnmodifyableHTableDescriptor modifyFamily(HColumnDescriptor family) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @param column
|
||||
* @return Column descriptor for the passed family name or the family on
|
||||
* passed in column.
|
||||
*/
|
||||
@Override
|
||||
public HColumnDescriptor removeFamily(final byte [] column) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean)
|
||||
*/
|
||||
@Override
|
||||
public UnmodifyableHTableDescriptor setReadOnly(boolean readOnly) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setValue(byte[], byte[])
|
||||
*/
|
||||
@Override
|
||||
public UnmodifyableHTableDescriptor setValue(byte[] key, byte[] value) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setValue(java.lang.String, java.lang.String)
|
||||
*/
|
||||
@Override
|
||||
public UnmodifyableHTableDescriptor setValue(String key, String value) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize(long)
|
||||
*/
|
||||
@Override
|
||||
public UnmodifyableHTableDescriptor setMaxFileSize(long maxFileSize) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setMemStoreFlushSize(long)
|
||||
*/
|
||||
@Override
|
||||
public UnmodifyableHTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
}
|
|
@ -35,6 +35,7 @@ import java.util.NavigableSet;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -67,6 +68,7 @@ import org.apache.hadoop.hbase.client.Consistency;
|
|||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.Increment;
|
||||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
|
||||
|
@ -76,6 +78,8 @@ import org.apache.hadoop.hbase.client.Result;
|
|||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.client.SnapshotType;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
import org.apache.hadoop.hbase.client.security.SecurityCapability;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
|
@ -420,18 +424,33 @@ public final class ProtobufUtil {
|
|||
* Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf
|
||||
*
|
||||
* @param proto the GetTableDescriptorsResponse
|
||||
* @return HTableDescriptor[]
|
||||
* @return a immutable HTableDescriptor array
|
||||
* @deprecated Use {@link #getTableDescriptorArray} after removing the HTableDescriptor
|
||||
*/
|
||||
@Deprecated
|
||||
public static HTableDescriptor[] getHTableDescriptorArray(GetTableDescriptorsResponse proto) {
|
||||
if (proto == null) return null;
|
||||
|
||||
HTableDescriptor[] ret = new HTableDescriptor[proto.getTableSchemaCount()];
|
||||
for (int i = 0; i < proto.getTableSchemaCount(); ++i) {
|
||||
ret[i] = convertToHTableDesc(proto.getTableSchema(i));
|
||||
ret[i] = new ImmutableHTableDescriptor(convertToHTableDesc(proto.getTableSchema(i)));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get TableDescriptor[] from GetTableDescriptorsResponse protobuf
|
||||
*
|
||||
* @param proto the GetTableDescriptorsResponse
|
||||
* @return TableDescriptor[]
|
||||
*/
|
||||
public static TableDescriptor[] getTableDescriptorArray(GetTableDescriptorsResponse proto) {
|
||||
if (proto == null) return new TableDescriptor[0];
|
||||
return proto.getTableSchemaList()
|
||||
.stream()
|
||||
.map(ProtobufUtil::convertToTableDesc)
|
||||
.toArray(size -> new TableDescriptor[size]);
|
||||
}
|
||||
/**
|
||||
* get the split keys in form "byte [][]" from a CreateTableRequest proto
|
||||
*
|
||||
|
@ -2850,7 +2869,7 @@ public final class ProtobufUtil {
|
|||
* @param htd the HTableDescriptor
|
||||
* @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
|
||||
*/
|
||||
public static TableSchema convertToTableSchema(HTableDescriptor htd) {
|
||||
public static TableSchema convertToTableSchema(TableDescriptor htd) {
|
||||
TableSchema.Builder builder = TableSchema.newBuilder();
|
||||
builder.setTableName(toProtoTableName(htd.getTableName()));
|
||||
for (Map.Entry<Bytes, Bytes> e : htd.getValues().entrySet()) {
|
||||
|
@ -2875,7 +2894,9 @@ public final class ProtobufUtil {
|
|||
* Converts a TableSchema to HTableDescriptor
|
||||
* @param ts A pb TableSchema instance.
|
||||
* @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
|
||||
* @deprecated Use {@link #convertToTableDesc} after removing the HTableDescriptor
|
||||
*/
|
||||
@Deprecated
|
||||
public static HTableDescriptor convertToHTableDesc(final TableSchema ts) {
|
||||
List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
|
||||
HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
|
||||
|
@ -2896,6 +2917,25 @@ public final class ProtobufUtil {
|
|||
return htd;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a TableSchema to TableDescriptor
|
||||
* @param ts A pb TableSchema instance.
|
||||
* @return An {@link TableDescriptor} made from the passed in pb <code>ts</code>.
|
||||
*/
|
||||
public static TableDescriptor convertToTableDesc(final TableSchema ts) {
|
||||
TableDescriptorBuilder builder
|
||||
= TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName()));
|
||||
ts.getColumnFamiliesList()
|
||||
.stream()
|
||||
.map(ProtobufUtil::convertToHColumnDesc)
|
||||
.forEach(builder::addFamily);
|
||||
ts.getAttributesList()
|
||||
.forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray()));
|
||||
ts.getConfigurationList()
|
||||
.forEach(a -> builder.setConfiguration(a.getName(), a.getValue()));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates {@link CompactionState} from
|
||||
* {@link org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState}
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
|
|||
import org.apache.hadoop.hbase.client.Row;
|
||||
import org.apache.hadoop.hbase.client.RowMutations;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
|
||||
|
@ -1244,7 +1245,7 @@ public final class RequestConverter {
|
|||
* @return a CreateTableRequest
|
||||
*/
|
||||
public static CreateTableRequest buildCreateTableRequest(
|
||||
final HTableDescriptor hTableDesc,
|
||||
final TableDescriptor hTableDesc,
|
||||
final byte [][] splitKeys,
|
||||
final long nonceGroup,
|
||||
final long nonce) {
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.BuilderStyleTest;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import static org.junit.Assert.fail;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
|
||||
@Category({ClientTests.class, SmallTests.class})
|
||||
public class TestImmutableHTableDescriptor {
|
||||
@Rule
|
||||
public TestName name = new TestName();
|
||||
private static final List<Consumer<ImmutableHTableDescriptor>> TEST_FUNCTION = Arrays.asList(
|
||||
htd -> htd.setValue("a", "a"),
|
||||
htd -> htd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")),
|
||||
htd -> htd.setValue(new Bytes(Bytes.toBytes("a")), new Bytes(Bytes.toBytes("a"))),
|
||||
htd -> htd.setCompactionEnabled(false),
|
||||
htd -> htd.setConfiguration("aaa", "ccc"),
|
||||
htd -> htd.setDurability(Durability.USE_DEFAULT),
|
||||
htd -> htd.setFlushPolicyClassName("class"),
|
||||
htd -> htd.setMaxFileSize(123),
|
||||
htd -> htd.setMemStoreFlushSize(123123123),
|
||||
htd -> htd.setNormalizationEnabled(false),
|
||||
htd -> htd.setPriority(123),
|
||||
htd -> htd.setReadOnly(true),
|
||||
htd -> htd.setRegionMemstoreReplication(true),
|
||||
htd -> htd.setRegionReplication(123),
|
||||
htd -> htd.setRegionSplitPolicyClassName("class"),
|
||||
htd -> htd.addFamily(new HColumnDescriptor(Bytes.toBytes("fm"))),
|
||||
htd -> htd.remove(new Bytes(Bytes.toBytes("aaa"))),
|
||||
htd -> htd.remove("aaa"),
|
||||
htd -> htd.remove(Bytes.toBytes("aaa")),
|
||||
htd -> htd.removeConfiguration("xxx"),
|
||||
htd -> htd.removeFamily(Bytes.toBytes("fm")),
|
||||
htd -> {
|
||||
try {
|
||||
htd.addCoprocessor("xxx");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
@Test
|
||||
public void testImmutable() {
|
||||
ImmutableHTableDescriptor htd = new ImmutableHTableDescriptor(
|
||||
new HTableDescriptor(TableName.valueOf(name.getMethodName())));
|
||||
TEST_FUNCTION.forEach(f -> {
|
||||
try {
|
||||
f.accept(htd);
|
||||
fail("ImmutableHTableDescriptor can't be modified!!!");
|
||||
} catch (UnsupportedOperationException e) {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClassMethodsAreBuilderStyle() {
|
||||
/* ImmutableHTableDescriptor should have a builder style setup where setXXX/addXXX methods
|
||||
* can be chainable together:
|
||||
* . For example:
|
||||
* ImmutableHTableDescriptor d
|
||||
* = new ImmutableHTableDescriptor()
|
||||
* .setFoo(foo)
|
||||
* .setBar(bar)
|
||||
* .setBuz(buz)
|
||||
*
|
||||
* This test ensures that all methods starting with "set" returns the declaring object
|
||||
*/
|
||||
|
||||
BuilderStyleTest.assertClassesAreBuilderStyle(ImmutableHTableDescriptor.class);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,376 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.BuilderStyleTest;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
|
||||
/**
|
||||
* Test setting values in the descriptor
|
||||
*/
|
||||
@Category({MiscTests.class, SmallTests.class})
|
||||
public class TestTableDescriptorBuilder {
|
||||
private static final Log LOG = LogFactory.getLog(TestTableDescriptorBuilder.class);
|
||||
|
||||
@Rule
|
||||
public TestName name = new TestName();
|
||||
|
||||
@Test (expected=IOException.class)
|
||||
public void testAddCoprocessorTwice() throws IOException {
|
||||
String cpName = "a.b.c.d";
|
||||
TableDescriptor htd
|
||||
= TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
|
||||
.addCoprocessor(cpName)
|
||||
.addCoprocessor(cpName)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAddCoprocessorWithSpecStr() throws IOException {
|
||||
String cpName = "a.b.c.d";
|
||||
TableDescriptorBuilder builder
|
||||
= TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME);
|
||||
|
||||
try {
|
||||
builder.addCoprocessorWithSpec(cpName);
|
||||
fail();
|
||||
} catch (IllegalArgumentException iae) {
|
||||
// Expected as cpName is invalid
|
||||
}
|
||||
|
||||
// Try minimal spec.
|
||||
try {
|
||||
builder.addCoprocessorWithSpec("file:///some/path" + "|" + cpName);
|
||||
fail();
|
||||
} catch (IllegalArgumentException iae) {
|
||||
// Expected to be invalid
|
||||
}
|
||||
|
||||
// Try more spec.
|
||||
String spec = "hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2";
|
||||
try {
|
||||
builder.addCoprocessorWithSpec(spec);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
fail();
|
||||
}
|
||||
|
||||
// Try double add of same coprocessor
|
||||
try {
|
||||
builder.addCoprocessorWithSpec(spec);
|
||||
fail();
|
||||
} catch (IOException ioe) {
|
||||
// Expect that the coprocessor already exists
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPb() throws DeserializationException, IOException {
|
||||
final int v = 123;
|
||||
TableDescriptor htd
|
||||
= TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
|
||||
.setMaxFileSize(v)
|
||||
.setDurability(Durability.ASYNC_WAL)
|
||||
.setReadOnly(true)
|
||||
.setRegionReplication(2)
|
||||
.build();
|
||||
|
||||
byte [] bytes = TableDescriptorBuilder.toByteArray(htd);
|
||||
TableDescriptor deserializedHtd = TableDescriptorBuilder.newBuilder(bytes).build();
|
||||
assertEquals(htd, deserializedHtd);
|
||||
assertEquals(v, deserializedHtd.getMaxFileSize());
|
||||
assertTrue(deserializedHtd.isReadOnly());
|
||||
assertEquals(Durability.ASYNC_WAL, deserializedHtd.getDurability());
|
||||
assertEquals(deserializedHtd.getRegionReplication(), 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test cps in the table description
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testGetSetRemoveCP() throws Exception {
|
||||
// simple CP
|
||||
String className = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver";
|
||||
TableDescriptor desc
|
||||
= TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.addCoprocessor(className) // add and check that it is present
|
||||
.build();
|
||||
assertTrue(desc.hasCoprocessor(className));
|
||||
desc = TableDescriptorBuilder.newBuilder(desc)
|
||||
.removeCoprocessor(className) // remove it and check that it is gone
|
||||
.build();
|
||||
assertFalse(desc.hasCoprocessor(className));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test cps in the table description
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testSetListRemoveCP() throws Exception {
|
||||
TableDescriptor desc
|
||||
= TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
|
||||
// Check that any coprocessor is present.
|
||||
assertTrue(desc.getCoprocessors().isEmpty());
|
||||
|
||||
// simple CP
|
||||
String className1 = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver";
|
||||
String className2 = "org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver";
|
||||
desc = TableDescriptorBuilder.newBuilder(desc)
|
||||
.addCoprocessor(className1) // Add the 1 coprocessor and check if present.
|
||||
.build();
|
||||
assertTrue(desc.getCoprocessors().size() == 1);
|
||||
assertTrue(desc.getCoprocessors().contains(className1));
|
||||
|
||||
desc = TableDescriptorBuilder.newBuilder(desc)
|
||||
// Add the 2nd coprocessor and check if present.
|
||||
// remove it and check that it is gone
|
||||
.addCoprocessor(className2)
|
||||
.build();
|
||||
assertTrue(desc.getCoprocessors().size() == 2);
|
||||
assertTrue(desc.getCoprocessors().contains(className2));
|
||||
|
||||
desc = TableDescriptorBuilder.newBuilder(desc)
|
||||
// Remove one and check
|
||||
.removeCoprocessor(className1)
|
||||
.build();
|
||||
assertTrue(desc.getCoprocessors().size() == 1);
|
||||
assertFalse(desc.getCoprocessors().contains(className1));
|
||||
assertTrue(desc.getCoprocessors().contains(className2));
|
||||
|
||||
desc = TableDescriptorBuilder.newBuilder(desc)
|
||||
// Remove the last and check
|
||||
.removeCoprocessor(className2)
|
||||
.build();
|
||||
assertTrue(desc.getCoprocessors().isEmpty());
|
||||
assertFalse(desc.getCoprocessors().contains(className1));
|
||||
assertFalse(desc.getCoprocessors().contains(className2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that we add and remove strings from settings properly.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testRemoveString() throws Exception {
|
||||
byte[] key = Bytes.toBytes("Some");
|
||||
byte[] value = Bytes.toBytes("value");
|
||||
TableDescriptor desc
|
||||
= TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setValue(key, value)
|
||||
.build();
|
||||
assertTrue(Bytes.equals(value, desc.getValue(key)));
|
||||
desc = TableDescriptorBuilder.newBuilder(desc)
|
||||
.remove(key)
|
||||
.build();
|
||||
assertTrue(desc.getValue(key) == null);
|
||||
}
|
||||
|
||||
String legalTableNames[] = { "foo", "with-dash_under.dot", "_under_start_ok",
|
||||
"with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02"
|
||||
, "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2",
|
||||
"trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02"};
|
||||
String illegalTableNames[] = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok",
|
||||
"-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash",
|
||||
"new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2"};
|
||||
|
||||
@Test
|
||||
public void testLegalTableNames() {
|
||||
for (String tn : legalTableNames) {
|
||||
TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(tn));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIllegalTableNames() {
|
||||
for (String tn : illegalTableNames) {
|
||||
try {
|
||||
TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(tn));
|
||||
fail("invalid tablename " + tn + " should have failed");
|
||||
} catch (Exception e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLegalTableNamesRegex() {
|
||||
for (String tn : legalTableNames) {
|
||||
TableName tName = TableName.valueOf(tn);
|
||||
assertTrue("Testing: '" + tn + "'", Pattern.matches(TableName.VALID_USER_TABLE_REGEX,
|
||||
tName.getNameAsString()));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIllegalTableNamesRegex() {
|
||||
for (String tn : illegalTableNames) {
|
||||
LOG.info("Testing: '" + tn + "'");
|
||||
assertFalse(Pattern.matches(TableName.VALID_USER_TABLE_REGEX, tn));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test default value handling for maxFileSize
|
||||
*/
|
||||
@Test
|
||||
public void testGetMaxFileSize() {
|
||||
TableDescriptor desc = TableDescriptorBuilder
|
||||
.newBuilder(TableName.valueOf(name.getMethodName())).build();
|
||||
assertEquals(-1, desc.getMaxFileSize());
|
||||
desc = TableDescriptorBuilder
|
||||
.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setMaxFileSize(1111L).build();
|
||||
assertEquals(1111L, desc.getMaxFileSize());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test default value handling for memStoreFlushSize
|
||||
*/
|
||||
@Test
|
||||
public void testGetMemStoreFlushSize() {
|
||||
TableDescriptor desc = TableDescriptorBuilder
|
||||
.newBuilder(TableName.valueOf(name.getMethodName())).build();
|
||||
assertEquals(-1, desc.getMemStoreFlushSize());
|
||||
desc = TableDescriptorBuilder
|
||||
.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setMemStoreFlushSize(1111L).build();
|
||||
assertEquals(1111L, desc.getMemStoreFlushSize());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that we add and remove strings from configuration properly.
|
||||
*/
|
||||
@Test
|
||||
public void testAddGetRemoveConfiguration() {
|
||||
String key = "Some";
|
||||
String value = "value";
|
||||
TableDescriptor desc = TableDescriptorBuilder
|
||||
.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setConfiguration(key, value)
|
||||
.build();
|
||||
assertEquals(value, desc.getConfigurationValue(key));
|
||||
desc = TableDescriptorBuilder
|
||||
.newBuilder(desc)
|
||||
.removeConfiguration(key)
|
||||
.build();
|
||||
assertEquals(null, desc.getConfigurationValue(key));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClassMethodsAreBuilderStyle() {
|
||||
BuilderStyleTest.assertClassesAreBuilderStyle(TableDescriptorBuilder.class);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testModifyFamily() {
|
||||
byte[] familyName = Bytes.toBytes("cf");
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(familyName);
|
||||
hcd.setBlocksize(1000);
|
||||
hcd.setDFSReplication((short) 3);
|
||||
TableDescriptor htd
|
||||
= TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.addFamily(hcd)
|
||||
.build();
|
||||
|
||||
assertEquals(1000, htd.getFamily(familyName).getBlocksize());
|
||||
assertEquals(3, htd.getFamily(familyName).getDFSReplication());
|
||||
hcd = new HColumnDescriptor(familyName);
|
||||
hcd.setBlocksize(2000);
|
||||
hcd.setDFSReplication((short) 1);
|
||||
htd = TableDescriptorBuilder.newBuilder(htd)
|
||||
.modifyFamily(hcd)
|
||||
.build();
|
||||
assertEquals(2000, htd.getFamily(familyName).getBlocksize());
|
||||
assertEquals(1, htd.getFamily(familyName).getDFSReplication());
|
||||
}
|
||||
|
||||
@Test(expected=IllegalArgumentException.class)
|
||||
public void testModifyInexistentFamily() {
|
||||
byte[] familyName = Bytes.toBytes("cf");
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(familyName);
|
||||
TableDescriptor htd = TableDescriptorBuilder
|
||||
.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.modifyFamily(hcd)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test(expected=IllegalArgumentException.class)
|
||||
public void testAddDuplicateFamilies() {
|
||||
byte[] familyName = Bytes.toBytes("cf");
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(familyName);
|
||||
hcd.setBlocksize(1000);
|
||||
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.addFamily(hcd)
|
||||
.build();
|
||||
assertEquals(1000, htd.getFamily(familyName).getBlocksize());
|
||||
hcd = new HColumnDescriptor(familyName);
|
||||
hcd.setBlocksize(2000);
|
||||
// add duplicate column
|
||||
TableDescriptorBuilder.newBuilder(htd).addFamily(hcd).build();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriority() {
|
||||
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setPriority(42)
|
||||
.build();
|
||||
assertEquals(42, htd.getPriority());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSerialReplicationScope() {
|
||||
HColumnDescriptor hcdWithScope = new HColumnDescriptor(Bytes.toBytes("cf0"));
|
||||
hcdWithScope.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
|
||||
HColumnDescriptor hcdWithoutScope = new HColumnDescriptor(Bytes.toBytes("cf1"));
|
||||
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.addFamily(hcdWithoutScope)
|
||||
.build();
|
||||
assertFalse(htd.hasSerialReplicationScope());
|
||||
|
||||
htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.addFamily(hcdWithScope)
|
||||
.build();
|
||||
assertTrue(htd.hasSerialReplicationScope());
|
||||
|
||||
htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.addFamily(hcdWithScope)
|
||||
.addFamily(hcdWithoutScope)
|
||||
.build();
|
||||
assertTrue(htd.hasSerialReplicationScope());
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.BuilderStyleTest;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ClientTests.class, SmallTests.class})
|
||||
public class TestUnmodifyableHTableDescriptor {
|
||||
|
||||
@Test
|
||||
public void testClassMethodsAreBuilderStyle() {
|
||||
/* UnmodifyableHTableDescriptor should have a builder style setup where setXXX/addXXX methods
|
||||
* can be chainable together:
|
||||
* . For example:
|
||||
* UnmodifyableHTableDescriptor d
|
||||
* = new UnmodifyableHTableDescriptor()
|
||||
* .setFoo(foo)
|
||||
* .setBar(bar)
|
||||
* .setBuz(buz)
|
||||
*
|
||||
* This test ensures that all methods starting with "set" returns the declaring object
|
||||
*/
|
||||
|
||||
BuilderStyleTest.assertClassesAreBuilderStyle(UnmodifyableHTableDescriptor.class);
|
||||
}
|
||||
|
||||
}
|
|
@ -111,7 +111,7 @@ public class RowResourceBase {
|
|||
if (admin.tableExists(TABLE_NAME)) {
|
||||
TEST_UTIL.deleteTable(TABLE_NAME);
|
||||
}
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLE);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
|
||||
htd.addFamily(new HColumnDescriptor(CFA));
|
||||
htd.addFamily(new HColumnDescriptor(CFB));
|
||||
admin.createTable(htd);
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.util.regex.Pattern;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.testclassification.RestTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -116,7 +117,7 @@ public class TestRemoteAdminRetries {
|
|||
testTimedOutCall(new CallExecutor() {
|
||||
@Override
|
||||
public void run() throws Exception {
|
||||
remoteAdmin.createTable(new HTableDescriptor(Bytes.toBytes("TestTable")));
|
||||
remoteAdmin.createTable(new HTableDescriptor(TableName.valueOf("TestTable")));
|
||||
}
|
||||
});
|
||||
verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
|
||||
|
|
|
@ -39,7 +39,6 @@ public interface RSGroupInfoManager {
|
|||
//Assigned before user tables
|
||||
TableName RSGROUP_TABLE_NAME =
|
||||
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup");
|
||||
byte[] RSGROUP_TABLE_NAME_BYTES = RSGROUP_TABLE_NAME.toBytes();
|
||||
String rsGroupZNode = "rsgroup";
|
||||
byte[] META_FAMILY_BYTES = Bytes.toBytes("m");
|
||||
byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i");
|
||||
|
|
|
@ -117,7 +117,7 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager {
|
|||
/** Table descriptor for <code>hbase:rsgroup</code> catalog table */
|
||||
private final static HTableDescriptor RSGROUP_TABLE_DESC;
|
||||
static {
|
||||
RSGROUP_TABLE_DESC = new HTableDescriptor(RSGROUP_TABLE_NAME_BYTES);
|
||||
RSGROUP_TABLE_DESC = new HTableDescriptor(RSGROUP_TABLE_NAME);
|
||||
RSGROUP_TABLE_DESC.addFamily(new HColumnDescriptor(META_FAMILY_BYTES));
|
||||
RSGROUP_TABLE_DESC.setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName());
|
||||
try {
|
||||
|
|
|
@ -189,7 +189,7 @@ public class TestRSGroups extends TestRSGroupsBase {
|
|||
final byte[] tableName = Bytes.toBytes(tablePrefix + "_testCreateAndAssign");
|
||||
admin.modifyNamespace(NamespaceDescriptor.create("default")
|
||||
.addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "default").build());
|
||||
final HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
|
||||
desc.addFamily(new HColumnDescriptor("f"));
|
||||
admin.createTable(desc);
|
||||
//wait for created table to be assigned
|
||||
|
|
|
@ -176,7 +176,7 @@ public class RestoreTool {
|
|||
LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
|
||||
|
||||
TableName newTableName = newTableNames[i];
|
||||
HTableDescriptor newTableDescriptor = admin.getTableDescriptor(newTableName);
|
||||
HTableDescriptor newTableDescriptor = new HTableDescriptor(admin.getTableDescriptor(newTableName));
|
||||
List<HColumnDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
|
||||
List<HColumnDescriptor> existingFamilies =
|
||||
Arrays.asList(newTableDescriptor.getColumnFamilies());
|
||||
|
@ -325,7 +325,7 @@ public class RestoreTool {
|
|||
LOG.debug("find table descriptor but no archive dir for table " + tableName
|
||||
+ ", will only create table");
|
||||
}
|
||||
tableDescriptor.setName(newTableName);
|
||||
tableDescriptor = new HTableDescriptor(newTableName, tableDescriptor);
|
||||
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor,
|
||||
truncateIfExists);
|
||||
return;
|
||||
|
@ -338,7 +338,7 @@ public class RestoreTool {
|
|||
if (tableDescriptor == null) {
|
||||
tableDescriptor = new HTableDescriptor(newTableName);
|
||||
} else {
|
||||
tableDescriptor.setName(newTableName);
|
||||
tableDescriptor = new HTableDescriptor(newTableName, tableDescriptor);
|
||||
}
|
||||
|
||||
// record all region dirs:
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.TableDescriptors;
|
|||
import org.apache.hadoop.hbase.TableInfoMissingException;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
|
||||
|
@ -128,74 +129,69 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
@VisibleForTesting
|
||||
public static HTableDescriptor createMetaTableDescriptor(final Configuration conf)
|
||||
throws IOException {
|
||||
HTableDescriptor metaDescriptor = new HTableDescriptor(
|
||||
TableName.META_TABLE_NAME,
|
||||
new HColumnDescriptor[] {
|
||||
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
return new HTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
|
||||
.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
HConstants.DEFAULT_HBASE_META_VERSIONS))
|
||||
.setInMemory(true)
|
||||
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||
.setInMemory(true)
|
||||
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true),
|
||||
new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true))
|
||||
.addFamily(new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
HConstants.DEFAULT_HBASE_META_VERSIONS))
|
||||
.setInMemory(true)
|
||||
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||
.setInMemory(true)
|
||||
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true),
|
||||
new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true))
|
||||
.addFamily(new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
HConstants.DEFAULT_HBASE_META_VERSIONS))
|
||||
.setInMemory(true)
|
||||
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||
.setInMemory(true)
|
||||
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true),
|
||||
new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true))
|
||||
.addFamily(new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
HConstants.DEFAULT_HBASE_META_VERSIONS))
|
||||
.setInMemory(true)
|
||||
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||
.setInMemory(true)
|
||||
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true),
|
||||
new HColumnDescriptor(HConstants.TABLE_FAMILY)
|
||||
// Ten is arbitrary number. Keep versions to help debugging.
|
||||
.setMaxVersions(10)
|
||||
.setInMemory(true)
|
||||
.setBlocksize(8 * 1024)
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true)
|
||||
}) {
|
||||
};
|
||||
metaDescriptor.addCoprocessor(
|
||||
"org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
|
||||
null, Coprocessor.PRIORITY_SYSTEM, null);
|
||||
return metaDescriptor;
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true))
|
||||
.addFamily(new HColumnDescriptor(HConstants.TABLE_FAMILY)
|
||||
// Ten is arbitrary number. Keep versions to help debugging.
|
||||
.setMaxVersions(10)
|
||||
.setInMemory(true)
|
||||
.setBlocksize(8 * 1024)
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true))
|
||||
.addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
|
||||
null, Coprocessor.PRIORITY_SYSTEM, null)
|
||||
.build());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1726,7 +1726,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
|||
public static void setReplicas(Admin admin, TableName table, int replicaCount)
|
||||
throws IOException, InterruptedException {
|
||||
admin.disableTable(table);
|
||||
HTableDescriptor desc = admin.getTableDescriptor(table);
|
||||
HTableDescriptor desc = new HTableDescriptor(admin.getTableDescriptor(table));
|
||||
desc.setRegionReplication(replicaCount);
|
||||
admin.modifyTable(desc.getTableName(), desc);
|
||||
admin.enableTable(table);
|
||||
|
|
|
@ -59,7 +59,7 @@ public class TestFSTableDescriptorForceCreation {
|
|||
// Cleanup old tests if any detritus laying around.
|
||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
HTableDescriptor htd = new HTableDescriptor(name);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||
fstd.add(htd);
|
||||
assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
TEST_UTIL.createTable(tables[i], FAMILY);
|
||||
}
|
||||
|
||||
HTableDescriptor[] tableDescs = admin.listTables().get();
|
||||
TableDescriptor[] tableDescs = admin.listTables().get();
|
||||
int size = tableDescs.length;
|
||||
assertTrue(size >= tables.length);
|
||||
for (int i = 0; i < tables.length && i < size; i++) {
|
||||
|
@ -140,13 +140,13 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
htd.addFamily(fam2);
|
||||
htd.addFamily(fam3);
|
||||
admin.createTable(htd).join();
|
||||
HTableDescriptor confirmedHtd = admin.getTableDescriptor(htd.getTableName()).get();
|
||||
assertEquals(htd.compareTo(confirmedHtd), 0);
|
||||
TableDescriptor confirmedHtd = admin.getTableDescriptor(htd.getTableName()).get();
|
||||
assertEquals(htd.compareTo(new HTableDescriptor(confirmedHtd)), 0);
|
||||
}
|
||||
|
||||
@Test(timeout = 300000)
|
||||
public void testCreateTable() throws Exception {
|
||||
HTableDescriptor[] tables = admin.listTables().get();
|
||||
TableDescriptor[] tables = admin.listTables().get();
|
||||
int numTables = tables.length;
|
||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||
admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(FAMILY)))
|
||||
|
@ -452,7 +452,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
} catch (Exception e) {
|
||||
}
|
||||
});
|
||||
HTableDescriptor[] failed = admin.deleteTables(Pattern.compile("testDeleteTables.*")).get();
|
||||
TableDescriptor[] failed = admin.deleteTables(Pattern.compile("testDeleteTables.*")).get();
|
||||
assertEquals(0, failed.length);
|
||||
Arrays.stream(tables).forEach((table) -> {
|
||||
admin.tableExists(table).thenAccept((exist) -> assertFalse(exist)).join();
|
||||
|
@ -727,7 +727,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
// Modify colymn family
|
||||
admin.modifyColumnFamily(tableName, cfDescriptor).join();
|
||||
|
||||
HTableDescriptor htd = admin.getTableDescriptor(tableName).get();
|
||||
TableDescriptor htd = admin.getTableDescriptor(tableName).get();
|
||||
HColumnDescriptor hcfd = htd.getFamily(FAMILY_0);
|
||||
assertTrue(hcfd.getBlocksize() == newBlockSize);
|
||||
} finally {
|
||||
|
|
|
@ -216,7 +216,7 @@ public class TestAsyncTableBatch {
|
|||
@Test
|
||||
public void testPartialSuccess() throws IOException, InterruptedException, ExecutionException {
|
||||
Admin admin = TEST_UTIL.getAdmin();
|
||||
HTableDescriptor htd = admin.getTableDescriptor(TABLE_NAME);
|
||||
HTableDescriptor htd = new HTableDescriptor(admin.getTableDescriptor(TABLE_NAME));
|
||||
htd.addCoprocessor(ErrorInjectObserver.class.getName());
|
||||
admin.modifyTable(TABLE_NAME, htd);
|
||||
AsyncTableBase table = tableGetter.apply(TABLE_NAME);
|
||||
|
|
|
@ -77,7 +77,7 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase {
|
|||
|
||||
@Test(timeout = 300000)
|
||||
public void disableNotFullReplication() throws Exception {
|
||||
HTableDescriptor table = admin2.getTableDescriptor(tableName);
|
||||
HTableDescriptor table = new HTableDescriptor(admin2.getTableDescriptor(tableName));
|
||||
HColumnDescriptor f = new HColumnDescriptor("notReplicatedFamily");
|
||||
table.addFamily(f);
|
||||
admin1.disableTable(tableName);
|
||||
|
@ -125,7 +125,7 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase {
|
|||
|
||||
@Test(timeout = 300000)
|
||||
public void testEnableReplicationWhenTableDescriptorIsNotSameInClusters() throws Exception {
|
||||
HTableDescriptor table = admin2.getTableDescriptor(tableName);
|
||||
HTableDescriptor table = new HTableDescriptor(admin2.getTableDescriptor(tableName));
|
||||
HColumnDescriptor f = new HColumnDescriptor("newFamily");
|
||||
table.addFamily(f);
|
||||
admin2.disableTable(tableName);
|
||||
|
|
|
@ -140,7 +140,7 @@ public class TestSimpleRegionNormalizerOnCluster {
|
|||
region.flush(true);
|
||||
}
|
||||
|
||||
HTableDescriptor htd = admin.getTableDescriptor(TABLENAME);
|
||||
HTableDescriptor htd = new HTableDescriptor(admin.getTableDescriptor(TABLENAME));
|
||||
htd.setNormalizationEnabled(true);
|
||||
admin.modifyTable(TABLENAME, htd);
|
||||
|
||||
|
@ -217,7 +217,7 @@ public class TestSimpleRegionNormalizerOnCluster {
|
|||
region.flush(true);
|
||||
}
|
||||
|
||||
HTableDescriptor htd = admin.getTableDescriptor(tableName);
|
||||
HTableDescriptor htd = new HTableDescriptor(admin.getTableDescriptor(tableName));
|
||||
htd.setNormalizationEnabled(true);
|
||||
admin.modifyTable(tableName, htd);
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ public class TestEncryptionKeyRotation {
|
|||
// Start the cluster back up
|
||||
TEST_UTIL.startMiniHBaseCluster(1, 1);
|
||||
// Verify the table can still be loaded
|
||||
TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
|
||||
TEST_UTIL.waitTableAvailable(htd.getTableName(), 5000);
|
||||
// Double check that the store file keys can be unwrapped
|
||||
storeFilePaths = findStorefilePaths(htd.getTableName());
|
||||
assertTrue(storeFilePaths.size() > 0);
|
||||
|
@ -266,7 +266,7 @@ public class TestEncryptionKeyRotation {
|
|||
HColumnDescriptor hcd = htd.getFamilies().iterator().next();
|
||||
// Create the test table
|
||||
TEST_UTIL.getAdmin().createTable(htd);
|
||||
TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
|
||||
TEST_UTIL.waitTableAvailable(htd.getTableName(), 5000);
|
||||
// Create a store file
|
||||
Table table = TEST_UTIL.getConnection().getTable(htd.getTableName());
|
||||
try {
|
||||
|
|
|
@ -101,7 +101,7 @@ public class TestEncryptionRandomKeying {
|
|||
|
||||
// Create the test table
|
||||
TEST_UTIL.getAdmin().createTable(htd);
|
||||
TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
|
||||
TEST_UTIL.waitTableAvailable(htd.getTableName(), 5000);
|
||||
|
||||
// Create a store file
|
||||
Table table = TEST_UTIL.getConnection().getTable(htd.getTableName());
|
||||
|
|
|
@ -5734,7 +5734,7 @@ public class TestHRegion {
|
|||
LOG.info("RegionEventDescriptor from WAL: " + desc);
|
||||
|
||||
assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType());
|
||||
assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName()));
|
||||
assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getTableName().toBytes()));
|
||||
assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(),
|
||||
hri.getEncodedNameAsBytes()));
|
||||
assertTrue(desc.getLogSequenceNumber() > 0);
|
||||
|
@ -5854,7 +5854,7 @@ public class TestHRegion {
|
|||
LOG.info("RegionEventDescriptor from WAL: " + desc);
|
||||
|
||||
assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType());
|
||||
assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName()));
|
||||
assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getTableName().toBytes()));
|
||||
assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(),
|
||||
hri.getEncodedNameAsBytes()));
|
||||
assertTrue(desc.getLogSequenceNumber() > 0);
|
||||
|
@ -5940,7 +5940,7 @@ public class TestHRegion {
|
|||
LOG.info("RegionEventDescriptor from WAL: " + desc);
|
||||
|
||||
assertEquals(RegionEventDescriptor.EventType.REGION_CLOSE, desc.getEventType());
|
||||
assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName()));
|
||||
assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getTableName().toBytes()));
|
||||
assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(),
|
||||
hri.getEncodedNameAsBytes()));
|
||||
assertTrue(desc.getLogSequenceNumber() > 0);
|
||||
|
|
|
@ -107,7 +107,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
|
|||
UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
|
||||
Connection connection = ConnectionFactory.createConnection(conf);
|
||||
Table t = connection.getTable(TEST_TABLE);
|
||||
HTableDescriptor htd = t.getTableDescriptor();
|
||||
HTableDescriptor htd = new HTableDescriptor(t.getTableDescriptor());
|
||||
htd.addCoprocessor("net.clayb.hbase.coprocessor.NotWhitelisted",
|
||||
new Path(coprocessorPath),
|
||||
Coprocessor.PRIORITY_USER, null);
|
||||
|
@ -153,7 +153,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
|
|||
// coprocessor file
|
||||
admin.disableTable(TEST_TABLE);
|
||||
Table t = connection.getTable(TEST_TABLE);
|
||||
HTableDescriptor htd = t.getTableDescriptor();
|
||||
HTableDescriptor htd = new HTableDescriptor(t.getTableDescriptor());
|
||||
htd.addCoprocessor("net.clayb.hbase.coprocessor.Whitelisted",
|
||||
new Path(coprocessorPath),
|
||||
Coprocessor.PRIORITY_USER, null);
|
||||
|
|
|
@ -147,7 +147,7 @@ public class MobSnapshotTestingUtils {
|
|||
|
||||
@Override
|
||||
public HTableDescriptor createHtd(final String tableName) {
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
|
||||
hcd.setMobEnabled(true);
|
||||
hcd.setMobThreshold(0L);
|
||||
|
|
|
@ -680,7 +680,7 @@ public final class SnapshotTestingUtils {
|
|||
}
|
||||
|
||||
public HTableDescriptor createHtd(final String tableName) {
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
|
||||
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
|
||||
return htd;
|
||||
}
|
||||
|
|
|
@ -261,7 +261,7 @@ public class TestFSTableDescriptors {
|
|||
final int count = 10;
|
||||
// Write out table infos.
|
||||
for (int i = 0; i < count; i++) {
|
||||
HTableDescriptor htd = new HTableDescriptor(name + i);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
|
||||
htds.createTableDescriptor(htd);
|
||||
}
|
||||
|
||||
|
@ -294,7 +294,7 @@ public class TestFSTableDescriptors {
|
|||
final int count = 4;
|
||||
// Write out table infos.
|
||||
for (int i = 0; i < count; i++) {
|
||||
HTableDescriptor htd = new HTableDescriptor(name + i);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
|
||||
htds.createTableDescriptor(htd);
|
||||
}
|
||||
// add hbase:meta
|
||||
|
@ -321,7 +321,7 @@ public class TestFSTableDescriptors {
|
|||
final int count = 10;
|
||||
// Write out table infos via non-cached FSTableDescriptors
|
||||
for (int i = 0; i < count; i++) {
|
||||
HTableDescriptor htd = new HTableDescriptor(name + i);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
|
||||
nonchtds.createTableDescriptor(htd);
|
||||
}
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ public class TestHBaseFsckEncryption {
|
|||
cfKey));
|
||||
htd.addFamily(hcd);
|
||||
TEST_UTIL.getAdmin().createTable(htd);
|
||||
TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
|
||||
TEST_UTIL.waitTableAvailable(htd.getTableName(), 5000);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -248,7 +248,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
|
|||
status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
|
||||
assertNotNull(status);
|
||||
|
||||
HTableDescriptor htd = admin.getTableDescriptor(tableName);
|
||||
HTableDescriptor htd = new HTableDescriptor(admin.getTableDescriptor(tableName));
|
||||
htd.setValue("NOT_DEFAULT", "true");
|
||||
admin.disableTable(tableName);
|
||||
admin.modifyTable(tableName, htd);
|
||||
|
|
|
@ -589,7 +589,7 @@ module Hbase
|
|||
table_name = TableName.valueOf(table_name_str)
|
||||
|
||||
# Get table descriptor
|
||||
htd = @admin.getTableDescriptor(table_name)
|
||||
htd = org.apache.hadoop.hbase.HTableDescriptor.new(@admin.getTableDescriptor(table_name))
|
||||
hasTableUpdate = false
|
||||
|
||||
# Process all args
|
||||
|
@ -1206,15 +1206,6 @@ module Hbase
|
|||
htd.setNormalizationEnabled(
|
||||
JBoolean.valueOf(arg.delete(NORMALIZATION_ENABLED))) if arg[NORMALIZATION_ENABLED]
|
||||
htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
|
||||
# DEFERRED_LOG_FLUSH is deprecated and was replaced by DURABILITY. To keep backward compatible, it still exists.
|
||||
# However, it has to be set before DURABILITY so that DURABILITY could overwrite if both args are set
|
||||
if arg.include?(DEFERRED_LOG_FLUSH)
|
||||
if arg.delete(DEFERRED_LOG_FLUSH).to_s.upcase == "TRUE"
|
||||
htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("ASYNC_WAL"))
|
||||
else
|
||||
htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("SYNC_WAL"))
|
||||
end
|
||||
end
|
||||
htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY]
|
||||
htd.setPriority(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::PRIORITY))) if arg[org.apache.hadoop.hbase.HTableDescriptor::PRIORITY]
|
||||
htd.setFlushPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY)) if arg[org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY]
|
||||
|
|
|
@ -40,8 +40,8 @@ or a shorter version:
|
|||
|
||||
hbase> alter_async 'ns1:t1', 'delete' => 'f1'
|
||||
|
||||
You can also change table-scope attributes like MAX_FILESIZE
|
||||
MEMSTORE_FLUSHSIZE, READONLY, and DEFERRED_LOG_FLUSH.
|
||||
You can also change table-scope attributes like MAX_FILESIZE,
|
||||
MEMSTORE_FLUSHSIZE, and READONLY.
|
||||
|
||||
For example, to change the max size of a family to 128MB, do:
|
||||
|
||||
|
|
Loading…
Reference in New Issue