HBASE-11826 Split each tableOrRegionName admin methods into two targetted methods (Carter and Enis)
This commit is contained in:
parent
11ba6ac7c9
commit
1a6eea335f
|
@ -482,102 +482,102 @@ public interface Admin extends Abortable, Closeable {
|
|||
List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException;
|
||||
|
||||
/**
|
||||
* Flush a table or an individual region. Synchronous operation.
|
||||
* Flush a table. Synchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to flush
|
||||
* @param tableName table to flush
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void flush(final String tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
void flush(final TableName tableName) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Flush a table or an individual region. Synchronous operation.
|
||||
* Flush an individual region. Synchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to flush
|
||||
* @param regionName region to flush
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void flush(final byte[] tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
void flushRegion(final byte[] regionName) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Compact a table or an individual region. Asynchronous operation.
|
||||
* Compact a table. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to compact
|
||||
* @param tableName table to compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void compact(final String tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
void compact(final TableName tableName) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Compact a table or an individual region. Asynchronous operation.
|
||||
* Compact an individual region. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to compact
|
||||
* @param regionName region to compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void compact(final byte[] tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
void compactRegion(final byte[] regionName) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Compact a column family within a table or region. Asynchronous operation.
|
||||
* Compact a column family within a table. Asynchronous operation.
|
||||
*
|
||||
* @param tableOrRegionName table or region to compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @param tableName table to compact
|
||||
* @param columnFamily column family within a table
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void compact(String tableOrRegionName, String columnFamily)
|
||||
throws IOException, InterruptedException;
|
||||
void compact(final TableName tableName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Compact a column family within a table or region. Asynchronous operation.
|
||||
* Compact a column family within a region. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @param regionName region to compact
|
||||
* @param columnFamily column family within a region
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException;
|
||||
void compactRegion(final byte[] regionName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Major compact a table. Asynchronous operation.
|
||||
*
|
||||
* @param tableName table to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void majorCompact(TableName tableName) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Major compact a table or an individual region. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @param regionName region to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void majorCompact(final String tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
void majorCompactRegion(final byte[] regionName) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Major compact a table or an individual region. Asynchronous operation.
|
||||
* Major compact a column family within a table. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @param tableName table to major compact
|
||||
* @param columnFamily column family within a table
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void majorCompact(final byte[] tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
void majorCompact(TableName tableName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Major compact a column family within a table or region. Asynchronous operation.
|
||||
* Major compact a column family within region. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @param regionName egion to major compact
|
||||
* @param columnFamily column family within a region
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void majorCompact(final String tableNameOrRegionName, final String columnFamily)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Major compact a column family within a table or region. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException;
|
||||
void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Move the region <code>r</code> to <code>dest</code>.
|
||||
|
@ -692,37 +692,44 @@ public interface Admin extends Abortable, Closeable {
|
|||
final boolean forcible) throws IOException;
|
||||
|
||||
/**
|
||||
* Split a table or an individual region. Asynchronous operation.
|
||||
* Split a table. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to split
|
||||
* @param tableName table to split
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void split(final String tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
void split(final TableName tableName) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Split a table or an individual region. Implicitly finds an optimal split point. Asynchronous
|
||||
* operation.
|
||||
* Split an individual region. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table to region to split
|
||||
* @param regionName region to split
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
void split(final byte[] tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
|
||||
void split(final String tableNameOrRegionName, final String splitPoint)
|
||||
throws IOException, InterruptedException;
|
||||
void splitRegion(final byte[] regionName) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Split a table or an individual region. Asynchronous operation.
|
||||
* Split a table. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table to region to split
|
||||
* @param tableName table to split
|
||||
* @param splitPoint the explicit position to split on
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException interrupt exception occurred
|
||||
*/
|
||||
void split(final byte[] tableNameOrRegionName, final byte[] splitPoint)
|
||||
throws IOException, InterruptedException;
|
||||
void split(final TableName tableName, final byte[] splitPoint)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Split an individual region. Asynchronous operation.
|
||||
*
|
||||
* @param regionName region to split
|
||||
* @param splitPoint the explicit position to split on
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException interrupt exception occurred
|
||||
*/
|
||||
void splitRegion(final byte[] regionName, final byte[] splitPoint)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Modify an existing table, more IRB friendly version. Asynchronous operation. This means that
|
||||
|
@ -880,28 +887,28 @@ public interface Admin extends Abortable, Closeable {
|
|||
String[] getMasterCoprocessors();
|
||||
|
||||
/**
|
||||
* Get the current compaction state of a table or region. It could be in a major compaction, a
|
||||
* minor compaction, both, or none.
|
||||
* Get the current compaction state of a table. It could be in a major compaction, a minor
|
||||
* compaction, both, or none.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @param tableName table to examine
|
||||
* @return the current compaction state
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState(
|
||||
final String tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState(final TableName tableName)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Get the current compaction state of a table or region. It could be in a major compaction, a
|
||||
* minor compaction, both, or none.
|
||||
* Get the current compaction state of region. It could be in a major compaction, a minor
|
||||
* compaction, both, or none.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @param regionName region to examine
|
||||
* @return the current compaction state
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState(
|
||||
final byte[] tableNameOrRegionName) throws IOException, InterruptedException;
|
||||
AdminProtos.GetRegionInfoResponse.CompactionState getCompactionStateForRegion(
|
||||
final byte[] regionName) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
|
||||
|
|
|
@ -269,6 +269,7 @@ public class HBaseAdmin implements Admin {
|
|||
* @return True if table exists already.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public boolean tableExists(final TableName tableName) throws IOException {
|
||||
return MetaTableAccessor.tableExists(connection, tableName);
|
||||
}
|
||||
|
@ -1466,47 +1467,56 @@ public class HBaseAdmin implements Admin {
|
|||
}
|
||||
|
||||
/**
|
||||
* Flush a table or an individual region.
|
||||
* Synchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to flush
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void flush(final TableName tableName) throws IOException, InterruptedException {
|
||||
checkTableExists(tableName);
|
||||
if (isTableDisabled(tableName)) {
|
||||
LOG.info("Table is disabled: " + tableName.getNameAsString());
|
||||
return;
|
||||
}
|
||||
execProcedure("flush-table-proc", tableName.getNameAsString(),
|
||||
new HashMap<String, String>());
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void flushRegion(final byte[] regionName) throws IOException, InterruptedException {
|
||||
Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
|
||||
if (regionServerPair == null) {
|
||||
throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
|
||||
}
|
||||
if (regionServerPair.getSecond() == null) {
|
||||
throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
|
||||
}
|
||||
flush(regionServerPair.getSecond(), regionServerPair.getFirst());
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void flush(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
flush(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush a table or an individual region.
|
||||
* Synchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to flush
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public void flush(final byte[] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
Pair<HRegionInfo, ServerName> regionServerPair
|
||||
= getRegion(tableNameOrRegionName);
|
||||
if (regionServerPair != null) {
|
||||
if (regionServerPair.getSecond() == null) {
|
||||
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
|
||||
} else {
|
||||
flush(regionServerPair.getSecond(), regionServerPair.getFirst());
|
||||
}
|
||||
} else {
|
||||
final TableName tableName = checkTableExists(
|
||||
TableName.valueOf(tableNameOrRegionName));
|
||||
if (isTableDisabled(tableName)) {
|
||||
LOG.info("Table is disabled: " + tableName.getNameAsString());
|
||||
return;
|
||||
}
|
||||
execProcedure("flush-table-proc", tableName.getNameAsString(),
|
||||
new HashMap<String, String>());
|
||||
try {
|
||||
flushRegion(tableNameOrRegionName);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Unknown region. Try table.
|
||||
flush(TableName.valueOf(tableNameOrRegionName));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1523,164 +1533,205 @@ public class HBaseAdmin implements Admin {
|
|||
}
|
||||
|
||||
/**
|
||||
* Compact a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void compact(final TableName tableName)
|
||||
throws IOException, InterruptedException {
|
||||
compact(tableName, null, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void compactRegion(final byte[] regionName)
|
||||
throws IOException, InterruptedException {
|
||||
compactRegion(regionName, null, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void compact(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
compact(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public void compact(final byte[] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
compact(tableNameOrRegionName, null, false);
|
||||
try {
|
||||
compactRegion(tableNameOrRegionName, null, false);
|
||||
} catch (IllegalArgumentException e) {
|
||||
compact(TableName.valueOf(tableNameOrRegionName), null, false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact a column family within a table or region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableOrRegionName table or region to compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void compact(final TableName tableName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException {
|
||||
compact(tableName, columnFamily, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void compactRegion(final byte[] regionName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException {
|
||||
compactRegion(regionName, columnFamily, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
|
||||
* (byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void compact(String tableOrRegionName, String columnFamily)
|
||||
throws IOException, InterruptedException {
|
||||
compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact a column family within a table or region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
|
||||
* (byte[], byte[])} instead.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException {
|
||||
compact(tableNameOrRegionName, columnFamily, false);
|
||||
try {
|
||||
compactRegion(tableNameOrRegionName, columnFamily, false);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Bad region, try table
|
||||
compact(TableName.valueOf(tableNameOrRegionName), columnFamily, false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Major compact a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void majorCompact(final TableName tableName)
|
||||
throws IOException, InterruptedException {
|
||||
compact(tableName, null, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void majorCompactRegion(final byte[] regionName)
|
||||
throws IOException, InterruptedException {
|
||||
compactRegion(regionName, null, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
|
||||
* #majorCompactRegion(byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void majorCompact(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
majorCompact(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Major compact a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
|
||||
* #majorCompactRegion(byte[])} instead.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public void majorCompact(final byte[] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
compact(tableNameOrRegionName, null, true);
|
||||
try {
|
||||
compactRegion(tableNameOrRegionName, null, true);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Invalid region, try table
|
||||
compact(TableName.valueOf(tableNameOrRegionName), null, true);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Major compact a column family within a table or region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void majorCompact(final String tableNameOrRegionName,
|
||||
final String columnFamily) throws IOException, InterruptedException {
|
||||
majorCompact(Bytes.toBytes(tableNameOrRegionName),
|
||||
Bytes.toBytes(columnFamily));
|
||||
public void majorCompact(final TableName tableName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException {
|
||||
compact(tableName, columnFamily, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Major compact a column family within a table or region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void majorCompact(final byte[] tableNameOrRegionName,
|
||||
final byte[] columnFamily) throws IOException, InterruptedException {
|
||||
compact(tableNameOrRegionName, columnFamily, true);
|
||||
public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException {
|
||||
compactRegion(regionName, columnFamily, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact a table or an individual region.
|
||||
* @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
|
||||
* byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void majorCompact(final String tableNameOrRegionName, final String columnFamily)
|
||||
throws IOException, InterruptedException {
|
||||
majorCompact(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(columnFamily));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
|
||||
* byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
|
||||
throws IOException, InterruptedException {
|
||||
try {
|
||||
compactRegion(tableNameOrRegionName, columnFamily, true);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Invalid region, try table
|
||||
compact(TableName.valueOf(tableNameOrRegionName), columnFamily, true);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact a table.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to compact
|
||||
* @param tableName table or region to compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @param major True if we are to do a major compaction.
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
private void compact(final byte[] tableNameOrRegionName,
|
||||
final byte[] columnFamily,final boolean major)
|
||||
private void compact(final TableName tableName, final byte[] columnFamily,final boolean major)
|
||||
throws IOException, InterruptedException {
|
||||
ZooKeeperWatcher zookeeper = null;
|
||||
try {
|
||||
Pair<HRegionInfo, ServerName> regionServerPair
|
||||
= getRegion(tableNameOrRegionName);
|
||||
if (regionServerPair != null) {
|
||||
if (regionServerPair.getSecond() == null) {
|
||||
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
|
||||
} else {
|
||||
compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
|
||||
}
|
||||
} else {
|
||||
final TableName tableName =
|
||||
checkTableExists(TableName.valueOf(tableNameOrRegionName));
|
||||
zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
|
||||
new ThrowableAbortable());
|
||||
List<Pair<HRegionInfo, ServerName>> pairs =
|
||||
MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection,
|
||||
tableName);
|
||||
for (Pair<HRegionInfo, ServerName> pair: pairs) {
|
||||
if (pair.getFirst().isOffline()) continue;
|
||||
if (pair.getSecond() == null) continue;
|
||||
try {
|
||||
compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
|
||||
} catch (NotServingRegionException e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
|
||||
pair.getFirst() + ": " +
|
||||
StringUtils.stringifyException(e));
|
||||
}
|
||||
checkTableExists(tableName);
|
||||
zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
|
||||
new ThrowableAbortable());
|
||||
List<Pair<HRegionInfo, ServerName>> pairs =
|
||||
MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
|
||||
for (Pair<HRegionInfo, ServerName> pair: pairs) {
|
||||
if (pair.getFirst().isOffline()) continue;
|
||||
if (pair.getSecond() == null) continue;
|
||||
try {
|
||||
compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
|
||||
} catch (NotServingRegionException e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
|
||||
pair.getFirst() + ": " +
|
||||
StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1691,6 +1742,28 @@ public class HBaseAdmin implements Admin {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param regionName region to compact
|
||||
* @param columnFamily column family within a table or region
|
||||
* @param major True if we are to do a major compaction.
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
private void compactRegion(final byte[] regionName, final byte[] columnFamily,final boolean major)
|
||||
throws IOException, InterruptedException {
|
||||
Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
|
||||
if (regionServerPair == null) {
|
||||
throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
|
||||
}
|
||||
if (regionServerPair.getSecond() == null) {
|
||||
throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
|
||||
}
|
||||
compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
|
||||
}
|
||||
|
||||
private void compact(final ServerName sn, final HRegionInfo hri,
|
||||
final boolean major, final byte [] family)
|
||||
throws IOException {
|
||||
|
@ -1726,7 +1799,7 @@ public class HBaseAdmin implements Admin {
|
|||
try {
|
||||
MoveRegionRequest request =
|
||||
RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
|
||||
stub.moveRegion(null,request);
|
||||
stub.moveRegion(null, request);
|
||||
} catch (ServiceException se) {
|
||||
IOException ioe = ProtobufUtil.getRemoteException(se);
|
||||
if (ioe instanceof HBaseIOException) {
|
||||
|
@ -1785,7 +1858,7 @@ public class HBaseAdmin implements Admin {
|
|||
public Void call(int callTimeout) throws ServiceException {
|
||||
UnassignRegionRequest request =
|
||||
RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
|
||||
master.unassignRegion(null,request);
|
||||
master.unassignRegion(null, request);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
@ -1858,7 +1931,7 @@ public class HBaseAdmin implements Admin {
|
|||
throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException {
|
||||
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
|
||||
try {
|
||||
return stub.balance(null,RequestConverter.buildBalanceRequest()).getBalancerRan();
|
||||
return stub.balance(null, RequestConverter.buildBalanceRequest()).getBalancerRan();
|
||||
} finally {
|
||||
stub.close();
|
||||
}
|
||||
|
@ -1877,7 +1950,7 @@ public class HBaseAdmin implements Admin {
|
|||
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
|
||||
try {
|
||||
return stub.enableCatalogJanitor(null,
|
||||
RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
|
||||
RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
|
||||
} finally {
|
||||
stub.close();
|
||||
}
|
||||
|
@ -1894,7 +1967,7 @@ public class HBaseAdmin implements Admin {
|
|||
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
|
||||
try {
|
||||
return stub.runCatalogScan(null,
|
||||
RequestConverter.buildCatalogScanRequest()).getScanResult();
|
||||
RequestConverter.buildCatalogScanRequest()).getScanResult();
|
||||
} finally {
|
||||
stub.close();
|
||||
}
|
||||
|
@ -1910,7 +1983,7 @@ public class HBaseAdmin implements Admin {
|
|||
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
|
||||
try {
|
||||
return stub.isCatalogJanitorEnabled(null,
|
||||
RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
|
||||
RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
|
||||
} finally {
|
||||
stub.close();
|
||||
}
|
||||
|
@ -1953,85 +2026,112 @@ public class HBaseAdmin implements Admin {
|
|||
}
|
||||
|
||||
/**
|
||||
* Split a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to split
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void split(final TableName tableName)
|
||||
throws IOException, InterruptedException {
|
||||
split(tableName, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void splitRegion(final byte[] regionName)
|
||||
throws IOException, InterruptedException {
|
||||
splitRegion(regionName, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void split(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
split(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a table or an individual region. Implicitly finds an optimal split
|
||||
* point. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table to region to split
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public void split(final byte[] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
split(tableNameOrRegionName, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void split(final TableName tableName, final byte [] splitPoint)
|
||||
throws IOException, InterruptedException {
|
||||
ZooKeeperWatcher zookeeper = null;
|
||||
try {
|
||||
checkTableExists(tableName);
|
||||
zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
|
||||
new ThrowableAbortable());
|
||||
List<Pair<HRegionInfo, ServerName>> pairs =
|
||||
MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
|
||||
for (Pair<HRegionInfo, ServerName> pair: pairs) {
|
||||
// May not be a server for a particular row
|
||||
if (pair.getSecond() == null) continue;
|
||||
HRegionInfo r = pair.getFirst();
|
||||
// check for parents
|
||||
if (r.isSplitParent()) continue;
|
||||
// if a split point given, only split that particular region
|
||||
if (splitPoint != null && !r.containsRow(splitPoint)) continue;
|
||||
// call out to region server to do split now
|
||||
split(pair.getSecond(), pair.getFirst(), splitPoint);
|
||||
}
|
||||
} finally {
|
||||
if (zookeeper != null) {
|
||||
zookeeper.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void splitRegion(final byte[] regionName, final byte [] splitPoint)
|
||||
throws IOException, InterruptedException {
|
||||
Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
|
||||
if (regionServerPair == null) {
|
||||
throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
|
||||
}
|
||||
if (regionServerPair.getSecond() == null) {
|
||||
throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
|
||||
}
|
||||
split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
|
||||
* byte[])} or {@link #splitRegion(byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void split(final String tableNameOrRegionName,
|
||||
final String splitPoint) throws IOException, InterruptedException {
|
||||
split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table to region to split
|
||||
* @param splitPoint the explicit position to split on
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException interrupt exception occurred
|
||||
* @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
|
||||
* byte[])} or {@link #splitRegion(byte[], byte[])} instead.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public void split(final byte[] tableNameOrRegionName,
|
||||
final byte [] splitPoint) throws IOException, InterruptedException {
|
||||
ZooKeeperWatcher zookeeper = null;
|
||||
try {
|
||||
Pair<HRegionInfo, ServerName> regionServerPair
|
||||
= getRegion(tableNameOrRegionName);
|
||||
if (regionServerPair != null) {
|
||||
if (regionServerPair.getSecond() == null) {
|
||||
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
|
||||
} else {
|
||||
split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
|
||||
}
|
||||
} else {
|
||||
final TableName tableName =
|
||||
checkTableExists(TableName.valueOf(tableNameOrRegionName));
|
||||
zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
|
||||
new ThrowableAbortable());
|
||||
List<Pair<HRegionInfo, ServerName>> pairs =
|
||||
MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection,
|
||||
tableName);
|
||||
for (Pair<HRegionInfo, ServerName> pair: pairs) {
|
||||
// May not be a server for a particular row
|
||||
if (pair.getSecond() == null) continue;
|
||||
HRegionInfo r = pair.getFirst();
|
||||
// check for parents
|
||||
if (r.isSplitParent()) continue;
|
||||
// if a split point given, only split that particular region
|
||||
if (splitPoint != null && !r.containsRow(splitPoint)) continue;
|
||||
// call out to region server to do split now
|
||||
split(pair.getSecond(), pair.getFirst(), splitPoint);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (zookeeper != null) {
|
||||
zookeeper.close();
|
||||
}
|
||||
splitRegion(tableNameOrRegionName, splitPoint);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Bad region, try table
|
||||
split(TableName.valueOf(tableNameOrRegionName), splitPoint);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2083,24 +2183,24 @@ public class HBaseAdmin implements Admin {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param tableNameOrRegionName Name of a table or name of a region.
|
||||
* @return a pair of HRegionInfo and ServerName if <code>tableNameOrRegionName</code> is
|
||||
* @param regionName Name of a region.
|
||||
* @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
|
||||
* a verified region name (we call {@link
|
||||
* MetaTableAccessor#getRegion(HConnection, byte[])}
|
||||
* else null.
|
||||
* Throw an exception if <code>tableNameOrRegionName</code> is null.
|
||||
* Throw IllegalArgumentException if <code>regionName</code> is null.
|
||||
* @throws IOException
|
||||
*/
|
||||
Pair<HRegionInfo, ServerName> getRegion(final byte[] tableNameOrRegionName) throws IOException {
|
||||
if (tableNameOrRegionName == null) {
|
||||
Pair<HRegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
|
||||
if (regionName == null) {
|
||||
throw new IllegalArgumentException("Pass a table name or region name");
|
||||
}
|
||||
Pair<HRegionInfo, ServerName> pair =
|
||||
MetaTableAccessor.getRegion(connection, tableNameOrRegionName);
|
||||
MetaTableAccessor.getRegion(connection, regionName);
|
||||
if (pair == null) {
|
||||
final AtomicReference<Pair<HRegionInfo, ServerName>> result =
|
||||
new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
|
||||
final String encodedName = Bytes.toString(tableNameOrRegionName);
|
||||
final String encodedName = Bytes.toString(regionName);
|
||||
MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
|
||||
@Override
|
||||
public boolean processRow(Result data) throws IOException {
|
||||
|
@ -2187,7 +2287,7 @@ public class HBaseAdmin implements Admin {
|
|||
executeCallable(new MasterCallable<Void>(getConnection()) {
|
||||
@Override
|
||||
public Void call(int callTimeout) throws ServiceException {
|
||||
master.stopMaster(null,StopMasterRequest.newBuilder().build());
|
||||
master.stopMaster(null, StopMasterRequest.newBuilder().build());
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
@ -2226,7 +2326,7 @@ public class HBaseAdmin implements Admin {
|
|||
@Override
|
||||
public ClusterStatus call(int callTimeout) throws ServiceException {
|
||||
GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
|
||||
return ClusterStatus.convert(master.getClusterStatus(null,req).getClusterStatus());
|
||||
return ClusterStatus.convert(master.getClusterStatus(null, req).getClusterStatus());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -2257,8 +2357,9 @@ public class HBaseAdmin implements Admin {
|
|||
public Void call(int callTimeout) throws Exception {
|
||||
master.createNamespace(null,
|
||||
CreateNamespaceRequest.newBuilder()
|
||||
.setNamespaceDescriptor(ProtobufUtil
|
||||
.toProtoNamespaceDescriptor(descriptor)).build());
|
||||
.setNamespaceDescriptor(ProtobufUtil
|
||||
.toProtoNamespaceDescriptor(descriptor)).build()
|
||||
);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
@ -2517,8 +2618,8 @@ public class HBaseAdmin implements Admin {
|
|||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws FailedLogCloseException
|
||||
*/
|
||||
@Override
|
||||
public synchronized byte[][] rollHLogWriter(String serverName)
|
||||
@Override
|
||||
public synchronized byte[][] rollHLogWriter(String serverName)
|
||||
throws IOException, FailedLogCloseException {
|
||||
ServerName sn = ServerName.valueOf(serverName);
|
||||
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
|
||||
|
@ -2548,96 +2649,60 @@ public synchronized byte[][] rollHLogWriter(String serverName)
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the current compaction state of a table or region.
|
||||
* It could be in a major compaction, a minor compaction, both, or none.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* @return the current compaction state
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public CompactionState getCompactionState(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current compaction state of a table or region.
|
||||
* It could be in a major compaction, a minor compaction, both, or none.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* @return the current compaction state
|
||||
*/
|
||||
@Override
|
||||
public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
public CompactionState getCompactionState(final TableName tableName)
|
||||
throws IOException, InterruptedException {
|
||||
CompactionState state = CompactionState.NONE;
|
||||
ZooKeeperWatcher zookeeper =
|
||||
new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
|
||||
new ThrowableAbortable());
|
||||
try {
|
||||
Pair<HRegionInfo, ServerName> regionServerPair = getRegion(tableNameOrRegionName);
|
||||
if (regionServerPair != null) {
|
||||
if (regionServerPair.getSecond() == null) {
|
||||
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
|
||||
} else {
|
||||
ServerName sn = regionServerPair.getSecond();
|
||||
checkTableExists(tableName);
|
||||
List<Pair<HRegionInfo, ServerName>> pairs =
|
||||
MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
|
||||
for (Pair<HRegionInfo, ServerName> pair: pairs) {
|
||||
if (pair.getFirst().isOffline()) continue;
|
||||
if (pair.getSecond() == null) continue;
|
||||
try {
|
||||
ServerName sn = pair.getSecond();
|
||||
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
|
||||
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
|
||||
regionServerPair.getFirst().getRegionName(), true);
|
||||
pair.getFirst().getRegionName(), true);
|
||||
GetRegionInfoResponse response = admin.getRegionInfo(null, request);
|
||||
return response.getCompactionState();
|
||||
}
|
||||
} else {
|
||||
final TableName tableName =
|
||||
checkTableExists(TableName.valueOf(tableNameOrRegionName));
|
||||
List<Pair<HRegionInfo, ServerName>> pairs =
|
||||
MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
|
||||
for (Pair<HRegionInfo, ServerName> pair: pairs) {
|
||||
if (pair.getFirst().isOffline()) continue;
|
||||
if (pair.getSecond() == null) continue;
|
||||
try {
|
||||
ServerName sn = pair.getSecond();
|
||||
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
|
||||
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
|
||||
pair.getFirst().getRegionName(), true);
|
||||
GetRegionInfoResponse response = admin.getRegionInfo(null, request);
|
||||
switch (response.getCompactionState()) {
|
||||
case MAJOR_AND_MINOR:
|
||||
switch (response.getCompactionState()) {
|
||||
case MAJOR_AND_MINOR:
|
||||
return CompactionState.MAJOR_AND_MINOR;
|
||||
case MAJOR:
|
||||
if (state == CompactionState.MINOR) {
|
||||
return CompactionState.MAJOR_AND_MINOR;
|
||||
case MAJOR:
|
||||
if (state == CompactionState.MINOR) {
|
||||
return CompactionState.MAJOR_AND_MINOR;
|
||||
}
|
||||
state = CompactionState.MAJOR;
|
||||
break;
|
||||
case MINOR:
|
||||
if (state == CompactionState.MAJOR) {
|
||||
return CompactionState.MAJOR_AND_MINOR;
|
||||
}
|
||||
state = CompactionState.MINOR;
|
||||
break;
|
||||
case NONE:
|
||||
default: // nothing, continue
|
||||
}
|
||||
} catch (NotServingRegionException e) {
|
||||
state = CompactionState.MAJOR;
|
||||
break;
|
||||
case MINOR:
|
||||
if (state == CompactionState.MAJOR) {
|
||||
return CompactionState.MAJOR_AND_MINOR;
|
||||
}
|
||||
state = CompactionState.MINOR;
|
||||
break;
|
||||
case NONE:
|
||||
default: // nothing, continue
|
||||
}
|
||||
} catch (NotServingRegionException e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Trying to get compaction state of " +
|
||||
pair.getFirst() + ": " +
|
||||
StringUtils.stringifyException(e));
|
||||
}
|
||||
} catch (RemoteException e) {
|
||||
if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Trying to get compaction state of " +
|
||||
pair.getFirst() + ": " +
|
||||
StringUtils.stringifyException(e));
|
||||
}
|
||||
} catch (RemoteException e) {
|
||||
if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
|
||||
+ StringUtils.stringifyException(e));
|
||||
}
|
||||
} else {
|
||||
throw e;
|
||||
LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
|
||||
+ StringUtils.stringifyException(e));
|
||||
}
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2649,6 +2714,56 @@ public synchronized byte[][] rollHLogWriter(String serverName)
|
|||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public CompactionState getCompactionStateForRegion(final byte[] regionName)
|
||||
throws IOException, InterruptedException {
|
||||
try {
|
||||
Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
|
||||
if (regionServerPair == null) {
|
||||
throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
|
||||
}
|
||||
if (regionServerPair.getSecond() == null) {
|
||||
throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
|
||||
}
|
||||
ServerName sn = regionServerPair.getSecond();
|
||||
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
|
||||
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
|
||||
regionServerPair.getFirst().getRegionName(), true);
|
||||
GetRegionInfoResponse response = admin.getRegionInfo(null, request);
|
||||
return response.getCompactionState();
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufUtil.getRemoteException(se);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
|
||||
* #getCompactionStateForRegion(byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public CompactionState getCompactionState(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
|
||||
* #getCompactionStateForRegion(byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
try {
|
||||
return getCompactionStateForRegion(tableNameOrRegionName);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Invalid region, try table
|
||||
return getCompactionState(TableName.valueOf(tableNameOrRegionName));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
|
||||
* taken. If the table is disabled, an offline snapshot is taken.
|
||||
|
@ -2726,7 +2841,7 @@ public synchronized byte[][] rollHLogWriter(String serverName)
|
|||
final byte[] tableName) throws IOException,
|
||||
SnapshotCreationException, IllegalArgumentException {
|
||||
snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName),
|
||||
SnapshotDescription.Type.FLUSH);
|
||||
SnapshotDescription.Type.FLUSH);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3398,7 +3513,8 @@ public synchronized byte[][] rollHLogWriter(String serverName)
|
|||
public Void call(int callTimeout) throws ServiceException {
|
||||
master.deleteSnapshot(null,
|
||||
DeleteSnapshotRequest.newBuilder().
|
||||
setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build());
|
||||
setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build()
|
||||
);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
|
|
@ -189,7 +189,7 @@ public abstract class IntegrationTestBase extends AbstractHBaseTool {
|
|||
|
||||
public abstract int runTestFromCommandLine() throws Exception;
|
||||
|
||||
public abstract String getTablename();
|
||||
public abstract TableName getTablename();
|
||||
|
||||
protected abstract Set<String> getColumnFamilies();
|
||||
}
|
||||
|
|
|
@ -115,9 +115,10 @@ public class IntegrationTestIngest extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String getTablename() {
|
||||
public TableName getTablename() {
|
||||
String clazz = this.getClass().getSimpleName();
|
||||
return conf.get(String.format("%s.%s", clazz, LoadTestTool.OPT_TABLE_NAME), clazz);
|
||||
return TableName.valueOf(
|
||||
conf.get(String.format("%s.%s", clazz, LoadTestTool.OPT_TABLE_NAME), clazz));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -126,8 +127,8 @@ public class IntegrationTestIngest extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
private void deleteTableIfNecessary() throws IOException {
|
||||
if (util.getHBaseAdmin().tableExists(TableName.valueOf(getTablename()))) {
|
||||
util.deleteTable(Bytes.toBytes(getTablename()));
|
||||
if (util.getHBaseAdmin().tableExists(getTablename())) {
|
||||
util.deleteTable(getTablename());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,7 +179,7 @@ public class IntegrationTestIngest extends IntegrationTestBase {
|
|||
protected String[] getArgsForLoadTestToolInitTable() {
|
||||
List<String> args = new ArrayList<String>();
|
||||
args.add("-tn");
|
||||
args.add(getTablename());
|
||||
args.add(getTablename().getNameAsString());
|
||||
// pass all remaining args from conf with keys <test class name>.<load test tool arg>
|
||||
String clazz = this.getClass().getSimpleName();
|
||||
for (String arg : LOAD_TEST_TOOL_INIT_ARGS) {
|
||||
|
@ -196,7 +197,7 @@ public class IntegrationTestIngest extends IntegrationTestBase {
|
|||
long numKeys) {
|
||||
List<String> args = new ArrayList<String>();
|
||||
args.add("-tn");
|
||||
args.add(getTablename());
|
||||
args.add(getTablename().getNameAsString());
|
||||
args.add(mode);
|
||||
args.add(modeSpecificArg);
|
||||
args.add("-start_key");
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Waiter.Predicate;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileReaderV3;
|
||||
|
@ -30,7 +29,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileWriterV3;
|
|||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogReader;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
|
@ -81,18 +79,18 @@ public class IntegrationTestIngestWithEncryption extends IntegrationTestIngest {
|
|||
// encryption features enabled.
|
||||
final Admin admin = util.getHBaseAdmin();
|
||||
HTableDescriptor tableDescriptor =
|
||||
new HTableDescriptor(admin.getTableDescriptor(TableName.valueOf(getTablename())));
|
||||
new HTableDescriptor(admin.getTableDescriptor(getTablename()));
|
||||
for (HColumnDescriptor columnDescriptor: tableDescriptor.getColumnFamilies()) {
|
||||
columnDescriptor.setEncryptionType("AES");
|
||||
LOG.info("Updating CF schema for " + getTablename() + "." +
|
||||
columnDescriptor.getNameAsString());
|
||||
admin.disableTable(TableName.valueOf(getTablename()));
|
||||
admin.modifyColumn(TableName.valueOf(getTablename()), columnDescriptor);
|
||||
admin.enableTable(TableName.valueOf(getTablename()));
|
||||
admin.disableTable(getTablename());
|
||||
admin.modifyColumn(getTablename(), columnDescriptor);
|
||||
admin.enableTable(getTablename());
|
||||
util.waitFor(30000, 1000, true, new Predicate<IOException>() {
|
||||
@Override
|
||||
public boolean evaluate() throws IOException {
|
||||
return admin.isTableAvailable(TableName.valueOf(getTablename()));
|
||||
return admin.isTableAvailable(getTablename());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
|||
import org.apache.hadoop.hbase.chaos.policies.PeriodicRandomActionPolicy;
|
||||
import org.apache.hadoop.hbase.chaos.policies.Policy;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.ipc.RpcClient;
|
||||
import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
|
||||
import org.apache.hadoop.mapreduce.Counters;
|
||||
|
@ -166,7 +165,7 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase {
|
|||
public void setUpMonkey() throws Exception {
|
||||
Policy p = new PeriodicRandomActionPolicy(sleepTime,
|
||||
new RestartRsHoldingTableAction(sleepTime, tableName.getNameAsString()),
|
||||
new MoveRandomRegionOfTableAction(tableName.getNameAsString()));
|
||||
new MoveRandomRegionOfTableAction(tableName));
|
||||
this.monkey = new PolicyBasedChaosMonkey(util, p);
|
||||
// don't start monkey right away
|
||||
}
|
||||
|
@ -213,8 +212,8 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String getTablename() {
|
||||
return tableName.getNameAsString();
|
||||
public TableName getTablename() {
|
||||
return tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,8 +25,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action the adds a column family to a table.
|
||||
|
@ -35,8 +33,8 @@ public class AddColumnAction extends Action {
|
|||
private final TableName tableName;
|
||||
private Admin admin;
|
||||
|
||||
public AddColumnAction(String tableName) {
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
public AddColumnAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,9 +25,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that tries to adjust the bloom filter setting on all the columns of a
|
||||
|
@ -37,13 +35,13 @@ public class ChangeBloomFilterAction extends Action {
|
|||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
public ChangeBloomFilterAction(String tableName) {
|
||||
public ChangeBloomFilterAction(TableName tableName) {
|
||||
this(-1, tableName);
|
||||
}
|
||||
|
||||
public ChangeBloomFilterAction(int sleepTime, String tableName) {
|
||||
public ChangeBloomFilterAction(int sleepTime, TableName tableName) {
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,23 +25,19 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that changes the compression algorithm on a column family from a list of tables.
|
||||
*/
|
||||
public class ChangeCompressionAction extends Action {
|
||||
private final TableName tableName;
|
||||
private final String tableNameString;
|
||||
|
||||
private Admin admin;
|
||||
private Random random;
|
||||
|
||||
public ChangeCompressionAction(String tableName) {
|
||||
tableNameString = tableName;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
public ChangeCompressionAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
|
@ -69,7 +65,7 @@ public class ChangeCompressionAction extends Action {
|
|||
Algorithm algo = possibleAlgos[random.nextInt(possibleAlgos.length)];
|
||||
|
||||
LOG.debug("Performing action: Changing compression algorithms on "
|
||||
+ tableNameString + " to " + algo);
|
||||
+ tableName.getNameAsString() + " to " + algo);
|
||||
for (HColumnDescriptor descriptor : columnDescriptors) {
|
||||
if (random.nextBoolean()) {
|
||||
descriptor.setCompactionCompressionType(algo);
|
||||
|
|
|
@ -25,9 +25,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that changes the encoding on a column family from a list of tables.
|
||||
|
@ -38,8 +36,8 @@ public class ChangeEncodingAction extends Action {
|
|||
private Admin admin;
|
||||
private Random random;
|
||||
|
||||
public ChangeEncodingAction(String tableName) {
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
public ChangeEncodingAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that changes the number of versions on a column family from a list of tables.
|
||||
|
@ -35,14 +33,12 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
*/
|
||||
public class ChangeVersionsAction extends Action {
|
||||
private final TableName tableName;
|
||||
private final String tableNameString;
|
||||
|
||||
private Admin admin;
|
||||
private Random random;
|
||||
|
||||
public ChangeVersionsAction(String tableName) {
|
||||
tableNameString = tableName;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
public ChangeVersionsAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
|
@ -66,7 +62,7 @@ public class ChangeVersionsAction extends Action {
|
|||
descriptor.setMaxVersions(versions);
|
||||
descriptor.setMinVersions(versions);
|
||||
}
|
||||
LOG.debug("Performing action: Changing versions on " + tableNameString);
|
||||
LOG.debug("Performing action: Changing versions on " + tableName.getNameAsString());
|
||||
admin.modifyTable(tableName, tableDescriptor);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Region that queues a compaction of a random region from the table.
|
||||
|
@ -38,15 +36,15 @@ public class CompactRandomRegionOfTableAction extends Action {
|
|||
private final TableName tableName;
|
||||
|
||||
public CompactRandomRegionOfTableAction(
|
||||
String tableName, float majorRatio) {
|
||||
TableName tableName, float majorRatio) {
|
||||
this(-1, tableName, majorRatio);
|
||||
}
|
||||
|
||||
public CompactRandomRegionOfTableAction(
|
||||
int sleepTime, String tableName, float majorRatio) {
|
||||
int sleepTime, TableName tableName, float majorRatio) {
|
||||
this.majorRatio = (int) (100 * majorRatio);
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,10 +67,10 @@ public class CompactRandomRegionOfTableAction extends Action {
|
|||
try {
|
||||
if (major) {
|
||||
LOG.debug("Major compacting region " + region.getRegionNameAsString());
|
||||
admin.majorCompact(region.getRegionName());
|
||||
admin.majorCompactRegion(region.getRegionName());
|
||||
} else {
|
||||
LOG.debug("Compacting region " + region.getRegionNameAsString());
|
||||
admin.compact(region.getRegionName());
|
||||
admin.compactRegion(region.getRegionName());
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
|
||||
|
|
|
@ -20,29 +20,26 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
|
||||
import org.apache.commons.lang.math.RandomUtils;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that queues a table compaction.
|
||||
*/
|
||||
public class CompactTableAction extends Action {
|
||||
private final byte[] tableNameBytes;
|
||||
private final TableName tableName;
|
||||
private final int majorRatio;
|
||||
private final long sleepTime;
|
||||
private final String tableName;
|
||||
|
||||
public CompactTableAction(String tableName, float majorRatio) {
|
||||
public CompactTableAction(TableName tableName, float majorRatio) {
|
||||
this(-1, tableName, majorRatio);
|
||||
}
|
||||
|
||||
public CompactTableAction(
|
||||
int sleepTime, String tableName, float majorRatio) {
|
||||
this.tableNameBytes = Bytes.toBytes(tableName);
|
||||
int sleepTime, TableName tableName, float majorRatio) {
|
||||
this.tableName = tableName;
|
||||
this.majorRatio = (int) (100 * majorRatio);
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -54,9 +51,9 @@ public class CompactTableAction extends Action {
|
|||
LOG.info("Performing action: Compact table " + tableName + ", major=" + major);
|
||||
try {
|
||||
if (major) {
|
||||
admin.majorCompact(tableNameBytes);
|
||||
admin.majorCompact(tableName);
|
||||
} else {
|
||||
admin.compact(tableNameBytes);
|
||||
admin.compact(tableName);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
|
||||
|
|
|
@ -25,8 +25,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that tries to flush a random region of a table.
|
||||
|
@ -35,13 +33,13 @@ public class FlushRandomRegionOfTableAction extends Action {
|
|||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
public FlushRandomRegionOfTableAction(String tableName) {
|
||||
public FlushRandomRegionOfTableAction(TableName tableName) {
|
||||
this (-1, tableName);
|
||||
}
|
||||
|
||||
public FlushRandomRegionOfTableAction(int sleepTime, String tableName) {
|
||||
public FlushRandomRegionOfTableAction(int sleepTime, TableName tableName) {
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,7 +58,7 @@ public class FlushRandomRegionOfTableAction extends Action {
|
|||
regions.toArray(new HRegionInfo[regions.size()]));
|
||||
LOG.debug("Flushing region " + region.getRegionNameAsString());
|
||||
try {
|
||||
admin.flush(region.getRegionName());
|
||||
admin.flushRegion(region.getRegionName());
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that tries to flush a table.
|
||||
|
@ -31,13 +29,13 @@ public class FlushTableAction extends Action {
|
|||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
public FlushTableAction(String tableName) {
|
||||
public FlushTableAction(TableName tableName) {
|
||||
this(-1, tableName);
|
||||
}
|
||||
|
||||
public FlushTableAction(int sleepTime, String tableName) {
|
||||
public FlushTableAction(int sleepTime, TableName tableName) {
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -47,7 +45,7 @@ public class FlushTableAction extends Action {
|
|||
|
||||
LOG.info("Performing action: Flush table " + tableName);
|
||||
try {
|
||||
admin.flush(tableName.toBytes());
|
||||
admin.flush(tableName);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
|
|
|
@ -25,8 +25,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action to merge regions of a table.
|
||||
|
@ -35,12 +33,12 @@ public class MergeRandomAdjacentRegionsOfTableAction extends Action {
|
|||
private final TableName tableName;
|
||||
private final long sleepTime;
|
||||
|
||||
public MergeRandomAdjacentRegionsOfTableAction(String tableName) {
|
||||
public MergeRandomAdjacentRegionsOfTableAction(TableName tableName) {
|
||||
this(-1, tableName);
|
||||
}
|
||||
|
||||
public MergeRandomAdjacentRegionsOfTableAction(int sleepTime, String tableName) {
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
public MergeRandomAdjacentRegionsOfTableAction(int sleepTime, TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
this.sleepTime = sleepTime;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that tries to move a random region of a table.
|
||||
|
@ -35,13 +33,13 @@ public class MoveRandomRegionOfTableAction extends Action {
|
|||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
public MoveRandomRegionOfTableAction(String tableName) {
|
||||
public MoveRandomRegionOfTableAction(TableName tableName) {
|
||||
this(-1, tableName);
|
||||
}
|
||||
|
||||
public MoveRandomRegionOfTableAction(long sleepTime, String tableName) {
|
||||
public MoveRandomRegionOfTableAction(long sleepTime, TableName tableName) {
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
|
@ -39,13 +38,13 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
private final TableName tableName;
|
||||
private final long maxTime;
|
||||
|
||||
public MoveRegionsOfTableAction(String tableName) {
|
||||
public MoveRegionsOfTableAction(TableName tableName) {
|
||||
this(-1, MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, tableName);
|
||||
}
|
||||
|
||||
public MoveRegionsOfTableAction(long sleepTime, long maxSleepTime, String tableName) {
|
||||
public MoveRegionsOfTableAction(long sleepTime, long maxSleepTime, TableName tableName) {
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
this.tableName = tableName;
|
||||
this.maxTime = maxSleepTime;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that removes a column family.
|
||||
|
@ -35,13 +33,11 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
public class RemoveColumnAction extends Action {
|
||||
private final TableName tableName;
|
||||
private final Set<String> protectedColumns;
|
||||
private final String tableNameString;
|
||||
private Admin admin;
|
||||
private Random random;
|
||||
|
||||
public RemoveColumnAction(String tableName, Set<String> protectedColumns) {
|
||||
tableNameString = tableName;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
public RemoveColumnAction(TableName tableName, Set<String> protectedColumns) {
|
||||
this.tableName = tableName;
|
||||
this.protectedColumns = protectedColumns;
|
||||
random = new Random();
|
||||
}
|
||||
|
@ -67,7 +63,7 @@ public class RemoveColumnAction extends Action {
|
|||
index = random.nextInt(columnDescriptors.length);
|
||||
}
|
||||
LOG.debug("Performing action: Removing " + columnDescriptors[index].getName() + " from "
|
||||
+ tableNameString);
|
||||
+ tableName.getNameAsString());
|
||||
tableDescriptor.removeFamily(columnDescriptors[index].getName());
|
||||
|
||||
admin.modifyTable(tableName, tableDescriptor);
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
|
||||
/**
|
||||
* Action that tries to take a snapshot of a table.
|
||||
|
@ -30,12 +29,12 @@ public class SnapshotTableAction extends Action {
|
|||
private final TableName tableName;
|
||||
private final long sleepTime;
|
||||
|
||||
public SnapshotTableAction(String tableName) {
|
||||
public SnapshotTableAction(TableName tableName) {
|
||||
this(-1, tableName);
|
||||
}
|
||||
|
||||
public SnapshotTableAction(int sleepTime, String tableName) {
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
public SnapshotTableAction(int sleepTime, TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
this.sleepTime = sleepTime;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Action that tries to split a random region of a table.
|
||||
|
@ -35,13 +33,13 @@ public class SplitRandomRegionOfTableAction extends Action {
|
|||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
public SplitRandomRegionOfTableAction(String tableName) {
|
||||
public SplitRandomRegionOfTableAction(TableName tableName) {
|
||||
this(-1, tableName);
|
||||
}
|
||||
|
||||
public SplitRandomRegionOfTableAction(int sleepTime, String tableName) {
|
||||
public SplitRandomRegionOfTableAction(int sleepTime, TableName tableName) {
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,7 +58,7 @@ public class SplitRandomRegionOfTableAction extends Action {
|
|||
regions.toArray(new HRegionInfo[regions.size()]));
|
||||
LOG.debug("Splitting region " + region.getRegionNameAsString());
|
||||
try {
|
||||
admin.split(region.getRegionName());
|
||||
admin.splitRegion(region.getRegionName());
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Split failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.util.Properties;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.hbase.IntegrationTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.ChaosMonkey;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
@ -32,12 +33,12 @@ import com.google.common.collect.ImmutableMap;
|
|||
*/
|
||||
public abstract class MonkeyFactory {
|
||||
|
||||
protected String tableName;
|
||||
protected TableName tableName;
|
||||
protected Set<String> columnFamilies;
|
||||
protected IntegrationTestingUtility util;
|
||||
protected Properties properties = new Properties();
|
||||
|
||||
public MonkeyFactory setTableName(String tableName) {
|
||||
public MonkeyFactory setTableName(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Consistency;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
|
@ -56,7 +55,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
|||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.RegionSplitter;
|
||||
|
@ -81,7 +79,6 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Test Bulk Load and MR on a distributed cluster.
|
||||
|
@ -151,8 +148,8 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
public static class SlowMeCoproScanOperations extends BaseRegionObserver {
|
||||
static final AtomicLong sleepTime = new AtomicLong(2000);
|
||||
Random r = new Random();
|
||||
AtomicLong countOfNext = new AtomicLong(0);
|
||||
AtomicLong countOfOpen = new AtomicLong(0);
|
||||
AtomicLong countOfNext = new AtomicLong(0);
|
||||
AtomicLong countOfOpen = new AtomicLong(0);
|
||||
public SlowMeCoproScanOperations() {}
|
||||
@Override
|
||||
public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
|
@ -185,7 +182,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
} catch (InterruptedException e1) {
|
||||
LOG.error(e1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -196,7 +193,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
|
||||
if (replicaCount == NUM_REPLICA_COUNT_DEFAULT) return;
|
||||
|
||||
TableName t = TableName.valueOf(getTablename());
|
||||
TableName t = getTablename();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
HTableDescriptor desc = admin.getTableDescriptor(t);
|
||||
desc.addCoprocessor(SlowMeCoproScanOperations.class.getName());
|
||||
|
@ -227,12 +224,12 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
private void setupTable() throws IOException, InterruptedException {
|
||||
if (util.getHBaseAdmin().tableExists(TableName.valueOf(getTablename()))) {
|
||||
if (util.getHBaseAdmin().tableExists(getTablename())) {
|
||||
util.deleteTable(getTablename());
|
||||
}
|
||||
|
||||
util.createTable(
|
||||
Bytes.toBytes(getTablename()),
|
||||
getTablename().getName(),
|
||||
new byte[][]{CHAIN_FAM, SORT_FAM, DATA_FAM},
|
||||
getSplits(16)
|
||||
);
|
||||
|
@ -240,7 +237,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
|
||||
if (replicaCount == NUM_REPLICA_COUNT_DEFAULT) return;
|
||||
|
||||
TableName t = TableName.valueOf(getTablename());
|
||||
TableName t = getTablename();
|
||||
HBaseTestingUtility.setReplicas(util.getHBaseAdmin(), t, replicaCount);
|
||||
}
|
||||
|
||||
|
@ -663,7 +660,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
TableMapReduceUtil.initTableMapperJob(
|
||||
Bytes.toBytes(getTablename()),
|
||||
getTablename().getName(),
|
||||
scan,
|
||||
LinkedListCheckingMapper.class,
|
||||
LinkKey.class,
|
||||
|
@ -731,8 +728,8 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String getTablename() {
|
||||
return getConf().get(TABLE_NAME_KEY, TABLE_NAME);
|
||||
public TableName getTablename() {
|
||||
return TableName.valueOf(getConf().get(TABLE_NAME_KEY, TABLE_NAME));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -52,14 +52,14 @@ import org.junit.experimental.categories.Category;
|
|||
*
|
||||
* Then the test creates a snapshot from this table, and overrides the values in the original
|
||||
* table with values 'after_snapshot_value'. The test, then runs a mapreduce job over the snapshot
|
||||
* with a scan start row 'bbb' and stop row 'yyy'. The data is saved in a single reduce output
|
||||
* with a scan start row 'bbb' and stop row 'yyy'. The data is saved in a single reduce output
|
||||
* file, and
|
||||
* inspected later to verify that the MR job has seen all the values from the snapshot.
|
||||
*
|
||||
* <p> These parameters can be used to configure the job:
|
||||
* <br>"IntegrationTestTableSnapshotInputFormat.table" => the name of the table
|
||||
* <br>"IntegrationTestTableSnapshotInputFormat.snapshot" => the name of the snapshot
|
||||
* <br>"IntegrationTestTableSnapshotInputFormat.numRegions" => number of regions in the table
|
||||
* <br>"IntegrationTestTableSnapshotInputFormat.numRegions" => number of regions in the table
|
||||
* to be created (default, 32).
|
||||
* <br>"IntegrationTestTableSnapshotInputFormat.tableDir" => temporary directory to restore the
|
||||
* snapshot files
|
||||
|
@ -74,9 +74,9 @@ public class IntegrationTestTableSnapshotInputFormat extends IntegrationTestBase
|
|||
private static final String TABLE_NAME_KEY = "IntegrationTestTableSnapshotInputFormat.table";
|
||||
private static final String DEFAULT_TABLE_NAME = "IntegrationTestTableSnapshotInputFormat";
|
||||
|
||||
private static final String SNAPSHOT_NAME_KEY =
|
||||
private static final String SNAPSHOT_NAME_KEY =
|
||||
"IntegrationTestTableSnapshotInputFormat.snapshot";
|
||||
private static final String NUM_REGIONS_KEY =
|
||||
private static final String NUM_REGIONS_KEY =
|
||||
"IntegrationTestTableSnapshotInputFormat.numRegions";
|
||||
|
||||
private static final String MR_IMPLEMENTATION_KEY =
|
||||
|
@ -175,7 +175,7 @@ public class IntegrationTestTableSnapshotInputFormat extends IntegrationTestBase
|
|||
}
|
||||
|
||||
@Override // CM is not intended to be run with this test
|
||||
public String getTablename() {
|
||||
public TableName getTablename() {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ public class IntegrationTestMTTR {
|
|||
|
||||
// Set up the action that will move the regions of our table.
|
||||
moveRegionAction = new MoveRegionsOfTableAction(sleepTime,
|
||||
MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, tableName.getNameAsString());
|
||||
MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, tableName);
|
||||
|
||||
// Kill the master
|
||||
restartMasterAction = new RestartActiveMasterAction(1000);
|
||||
|
@ -346,6 +346,7 @@ public class IntegrationTestMTTR {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
Objects.ToStringHelper helper = Objects.toStringHelper(this)
|
||||
.add("numResults", stats.getN())
|
||||
|
|
|
@ -1153,9 +1153,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String getTablename() {
|
||||
public TableName getTablename() {
|
||||
Configuration c = getConf();
|
||||
return c.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME);
|
||||
return TableName.valueOf(c.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -120,6 +120,7 @@ public class IntegrationTestLoadAndVerify extends IntegrationTestBase {
|
|||
REFERENCES_CHECKED
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setUpCluster() throws Exception {
|
||||
util = getTestingUtil(getConf());
|
||||
util.initializeCluster(3);
|
||||
|
@ -421,6 +422,7 @@ public void cleanUpCluster() throws Exception {
|
|||
toRun = args[0];
|
||||
}
|
||||
|
||||
@Override
|
||||
public int runTestFromCommandLine() throws Exception {
|
||||
IntegrationTestingUtility.setUseDistributedCluster(getConf());
|
||||
boolean doLoad = false;
|
||||
|
@ -442,8 +444,8 @@ public void cleanUpCluster() throws Exception {
|
|||
}
|
||||
|
||||
// create HTableDescriptor for specified table
|
||||
String table = getTablename();
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
|
||||
TableName table = getTablename();
|
||||
HTableDescriptor htd = new HTableDescriptor(table);
|
||||
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
|
||||
|
||||
HBaseAdmin admin = new HBaseAdmin(getConf());
|
||||
|
@ -461,8 +463,8 @@ public void cleanUpCluster() throws Exception {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String getTablename() {
|
||||
return getConf().get(TABLE_NAME_KEY, TEST_NAME);
|
||||
public TableName getTablename() {
|
||||
return TableName.valueOf(getConf().get(TABLE_NAME_KEY, TEST_NAME));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory;
|
|||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Consistency;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
|
||||
|
@ -167,8 +166,8 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
|
|||
Threads.sleep(refreshTime);
|
||||
} else {
|
||||
LOG.info("Reopening the table");
|
||||
admin.disableTable(TableName.valueOf(getTablename()));
|
||||
admin.enableTable(TableName.valueOf(getTablename()));
|
||||
admin.disableTable(getTablename());
|
||||
admin.enableTable(getTablename());
|
||||
}
|
||||
|
||||
// We should only start the ChaosMonkey after the readers are started and have cached
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.IntegrationTestingUtility;
|
||||
import org.apache.hadoop.hbase.IntegrationTests;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
@ -65,19 +64,19 @@ import org.junit.experimental.categories.Category;
|
|||
* that) added with visibility expressions. In load step, 200 map tasks are launched, which in turn
|
||||
* write loadmapper.num_to_write (default 100K) rows to an hbase table. Rows are written in blocks,
|
||||
* for a total of 100 blocks.
|
||||
*
|
||||
*
|
||||
* Verify step scans the table as both users with Authorizations. This step asserts that user can
|
||||
* see only those rows (and so cells) with visibility for which they have label auth.
|
||||
*
|
||||
*
|
||||
* This class can be run as a unit test, as an integration test, or from the command line.
|
||||
*
|
||||
*
|
||||
* Originally taken from Apache Bigtop.
|
||||
* Issue user names as comma seperated list.
|
||||
*./hbase IntegrationTestWithCellVisibilityLoadAndVerify -u usera,userb
|
||||
*/
|
||||
@Category(IntegrationTests.class)
|
||||
public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationTestLoadAndVerify {
|
||||
private static final String ERROR_STR =
|
||||
private static final String ERROR_STR =
|
||||
"Two user names are to be specified seperated by a ',' like 'usera,userb'";
|
||||
private static final char NOT = '!';
|
||||
private static final char OR = '|';
|
||||
|
@ -257,10 +256,12 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
|
|||
return job;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setMapperClass(Job job) {
|
||||
job.setMapperClass(LoadWithCellVisibilityMapper.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doVerify(final Configuration conf, final HTableDescriptor htd) throws Exception {
|
||||
System.out.println(String.format("Verifying for auths %s, %s, %s, %s", CONFIDENTIAL, TOPSECRET,
|
||||
SECRET, PRIVATE));
|
||||
|
@ -343,6 +344,7 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
|
|||
job.getConfiguration().setInt(TableRecordReaderImpl.LOG_PER_ROW_COUNT, (int) lpr);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void usage() {
|
||||
System.err.println(this.getClass().getSimpleName() + " -u usera,userb [-Doptions]");
|
||||
System.err.println(" Loads a table with cell visibilities and verifies with Authorizations");
|
||||
|
@ -359,12 +361,12 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
|
|||
+ "Number hbase scanner caching rows to read (default 50)");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int runTestFromCommandLine() throws Exception {
|
||||
IntegrationTestingUtility.setUseDistributedCluster(getConf());
|
||||
int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
|
||||
// create HTableDescriptor for specified table
|
||||
String table = getTablename();
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
|
||||
HTableDescriptor htd = new HTableDescriptor(getTablename());
|
||||
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
|
||||
|
||||
HBaseAdmin admin = new HBaseAdmin(getConf());
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
package org.apache.hadoop.hbase.trace;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.IntegrationTestingUtility;
|
||||
|
@ -28,7 +26,6 @@ import org.apache.hadoop.hbase.IntegrationTests;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
@ -38,10 +35,8 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.htrace.Sampler;
|
||||
import org.htrace.Span;
|
||||
import org.htrace.Trace;
|
||||
import org.htrace.TraceScope;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
|
@ -255,13 +250,13 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
|||
ht.put(p);
|
||||
}
|
||||
if ((x % 1000) == 0) {
|
||||
admin.flush(tableName.toBytes());
|
||||
admin.flush(tableName);
|
||||
}
|
||||
} finally {
|
||||
traceScope.close();
|
||||
}
|
||||
}
|
||||
admin.flush(tableName.toBytes());
|
||||
admin.flush(tableName);
|
||||
return rowKeys;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
|
@ -255,7 +254,7 @@ public class TestNamespace {
|
|||
p.add(Bytes.toBytes("my_cf"),Bytes.toBytes("my_col"),Bytes.toBytes("value1"));
|
||||
table.put(p);
|
||||
//flush and read from disk to make sure directory changes are working
|
||||
admin.flush(desc.getTableName().getName());
|
||||
admin.flush(desc.getTableName());
|
||||
Get g = new Get(Bytes.toBytes("row1"));
|
||||
assertTrue(table.exists(g));
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ public class TestAdmin {
|
|||
|
||||
@Test (timeout=300000)
|
||||
public void testSplitFlushCompactUnknownTable() throws InterruptedException {
|
||||
final String unknowntable = "fubar";
|
||||
final TableName unknowntable = TableName.valueOf("fubar");
|
||||
Exception exception = null;
|
||||
try {
|
||||
this.admin.compact(unknowntable);
|
||||
|
@ -1023,10 +1023,11 @@ public class TestAdmin {
|
|||
scanner.next();
|
||||
|
||||
// Split the table
|
||||
this.admin.split(tableName.getName(), splitPoint);
|
||||
this.admin.split(tableName, splitPoint);
|
||||
|
||||
final AtomicInteger count = new AtomicInteger(0);
|
||||
Thread t = new Thread("CheckForSplit") {
|
||||
@Override
|
||||
public void run() {
|
||||
for (int i = 0; i < 45; i++) {
|
||||
try {
|
||||
|
@ -1636,7 +1637,7 @@ public class TestAdmin {
|
|||
// make sure log.hflush() calls syncFs() to open a pipeline
|
||||
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
|
||||
// lower the namenode & datanode heartbeat so the namenode
|
||||
// quickly detects datanode failures
|
||||
// quickly detects datanode failures
|
||||
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
|
||||
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
|
||||
// the namenode might still try to choose the recently-dead datanode
|
||||
|
|
|
@ -238,7 +238,7 @@ public class TestFromClientSide {
|
|||
*/
|
||||
@Test
|
||||
public void testPurgeFutureDeletes() throws Exception {
|
||||
final byte[] TABLENAME = Bytes.toBytes("testPurgeFutureDeletes");
|
||||
final TableName TABLENAME = TableName.valueOf("testPurgeFutureDeletes");
|
||||
final byte[] ROW = Bytes.toBytes("row");
|
||||
final byte[] FAMILY = Bytes.toBytes("family");
|
||||
final byte[] COLUMN = Bytes.toBytes("column");
|
||||
|
|
|
@ -256,7 +256,7 @@ public class TestReplicaWithCluster {
|
|||
final HTable table = new HTable(HTU.getConfiguration(), hdt.getTableName());
|
||||
table.put(p);
|
||||
|
||||
HTU.getHBaseAdmin().flush(table.getTableName());
|
||||
HTU.getHBaseAdmin().flush(table.getName());
|
||||
LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster.");
|
||||
|
||||
Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() {
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -303,7 +302,7 @@ public class TestSnapshotCloneIndependence {
|
|||
originalRegionCount, cloneTableRegionCount);
|
||||
|
||||
// Split a region on the parent table
|
||||
admin.split(originalTableHRegions.get(0).getRegionName());
|
||||
admin.splitRegion(originalTableHRegions.get(0).getRegionName());
|
||||
waitOnSplit(original, originalRegionCount);
|
||||
|
||||
// Verify that the cloned table region is not split
|
||||
|
|
|
@ -105,7 +105,7 @@ public class TestTableSnapshotScanner {
|
|||
util.loadTable(table, FAMILIES, value);
|
||||
|
||||
// cause flush to create new files in the region
|
||||
admin.flush(tableName.toString());
|
||||
admin.flush(tableName);
|
||||
table.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.client.Append;
|
|||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Increment;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -240,7 +239,7 @@ public class TestRegionObserverInterface {
|
|||
|
||||
@Test
|
||||
public void testCheckAndPutHooks() throws IOException {
|
||||
TableName tableName =
|
||||
TableName tableName =
|
||||
TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks");
|
||||
HTable table = util.createTable(tableName, new byte[][] {A, B, C});
|
||||
try {
|
||||
|
@ -251,14 +250,14 @@ public class TestRegionObserverInterface {
|
|||
p = new Put(Bytes.toBytes(0));
|
||||
p.add(A, A, A);
|
||||
verifyMethodResult(SimpleRegionObserver.class,
|
||||
new String[] {"hadPreCheckAndPut",
|
||||
new String[] {"hadPreCheckAndPut",
|
||||
"hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"},
|
||||
tableName,
|
||||
new Boolean[] {false, false, false}
|
||||
);
|
||||
table.checkAndPut(Bytes.toBytes(0), A, A, A, p);
|
||||
verifyMethodResult(SimpleRegionObserver.class,
|
||||
new String[] {"hadPreCheckAndPut",
|
||||
new String[] {"hadPreCheckAndPut",
|
||||
"hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"},
|
||||
tableName,
|
||||
new Boolean[] {true, true, true}
|
||||
|
@ -271,7 +270,7 @@ public class TestRegionObserverInterface {
|
|||
|
||||
@Test
|
||||
public void testCheckAndDeleteHooks() throws IOException {
|
||||
TableName tableName =
|
||||
TableName tableName =
|
||||
TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndDeleteHooks");
|
||||
HTable table = util.createTable(tableName, new byte[][] {A, B, C});
|
||||
try {
|
||||
|
@ -282,14 +281,14 @@ public class TestRegionObserverInterface {
|
|||
Delete d = new Delete(Bytes.toBytes(0));
|
||||
table.delete(d);
|
||||
verifyMethodResult(SimpleRegionObserver.class,
|
||||
new String[] {"hadPreCheckAndDelete",
|
||||
new String[] {"hadPreCheckAndDelete",
|
||||
"hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
|
||||
tableName,
|
||||
new Boolean[] {false, false, false}
|
||||
);
|
||||
table.checkAndDelete(Bytes.toBytes(0), A, A, A, d);
|
||||
verifyMethodResult(SimpleRegionObserver.class,
|
||||
new String[] {"hadPreCheckAndDelete",
|
||||
new String[] {"hadPreCheckAndDelete",
|
||||
"hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
|
||||
tableName,
|
||||
new Boolean[] {true, true, true}
|
||||
|
@ -516,7 +515,7 @@ public class TestRegionObserverInterface {
|
|||
|
||||
// force a compaction
|
||||
long ts = System.currentTimeMillis();
|
||||
admin.flush(compactTable.toBytes());
|
||||
admin.flush(compactTable);
|
||||
// wait for flush
|
||||
for (int i=0; i<10; i++) {
|
||||
if (compactor.lastFlush >= ts) {
|
||||
|
@ -528,7 +527,7 @@ public class TestRegionObserverInterface {
|
|||
LOG.debug("Flush complete");
|
||||
|
||||
ts = compactor.lastFlush;
|
||||
admin.majorCompact(compactTable.toBytes());
|
||||
admin.majorCompact(compactTable);
|
||||
// wait for compaction
|
||||
for (int i=0; i<30; i++) {
|
||||
if (compactor.lastCompaction >= ts) {
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.MediumTests;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.IsolationLevel;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -214,14 +213,14 @@ public class TestRegionObserverScannerOpenHook {
|
|||
*/
|
||||
public static class CompactionCompletionNotifyingRegion extends HRegion {
|
||||
private static volatile CountDownLatch compactionStateChangeLatch = null;
|
||||
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public CompactionCompletionNotifyingRegion(Path tableDir, HLog log,
|
||||
FileSystem fs, Configuration confParam, HRegionInfo info,
|
||||
HTableDescriptor htd, RegionServerServices rsServices) {
|
||||
super(tableDir, log, fs, confParam, info, htd, rsServices);
|
||||
}
|
||||
|
||||
|
||||
public CountDownLatch getCompactionStateChangeLatch() {
|
||||
if (compactionStateChangeLatch == null) compactionStateChangeLatch = new CountDownLatch(1);
|
||||
return compactionStateChangeLatch;
|
||||
|
@ -231,9 +230,9 @@ public class TestRegionObserverScannerOpenHook {
|
|||
boolean ret = super.compact(compaction, store);
|
||||
if (ret) compactionStateChangeLatch.countDown();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Unfortunately, the easiest way to test this is to spin up a mini-cluster since we want to do
|
||||
* the usual compaction mechanism on the region, rather than going through the backdoor to the
|
||||
|
@ -270,16 +269,16 @@ public class TestRegionObserverScannerOpenHook {
|
|||
List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
|
||||
assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
|
||||
HRegion region = regions.get(0);
|
||||
admin.flush(region.getRegionName());
|
||||
admin.flushRegion(region.getRegionName());
|
||||
CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region)
|
||||
.getCompactionStateChangeLatch();
|
||||
|
||||
|
||||
// put another row and flush that too
|
||||
put = new Put(Bytes.toBytes("anotherrow"));
|
||||
put.add(A, A, A);
|
||||
table.put(put);
|
||||
table.flushCommits();
|
||||
admin.flush(region.getRegionName());
|
||||
admin.flushRegion(region.getRegionName());
|
||||
|
||||
// run a compaction, which normally would should get rid of the data
|
||||
// wait for the compaction checker to complete
|
||||
|
|
|
@ -109,7 +109,7 @@ public class TestPrefixTree {
|
|||
put.add(fam, qual2, Bytes.toBytes("c2-value-3"));
|
||||
table.put(put);
|
||||
table.flushCommits();
|
||||
hBaseAdmin.flush(tableName.getNameAsString());
|
||||
hBaseAdmin.flush(tableName);
|
||||
String[] rows = new String[3];
|
||||
rows[0] = row1;
|
||||
rows[1] = row2;
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.CellScanner;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
|
@ -167,7 +166,7 @@ public abstract class TableSnapshotInputFormatTestBase {
|
|||
util.loadTable(table, FAMILIES, value);
|
||||
|
||||
// cause flush to create new files in the region
|
||||
admin.flush(tableName.toString());
|
||||
admin.flush(tableName);
|
||||
table.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
|
@ -44,7 +43,6 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category(MediumTests.class)
|
||||
|
@ -70,12 +68,14 @@ public class TestAssignmentListener {
|
|||
public DummyAssignmentListener() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void regionOpened(final HRegionInfo regionInfo, final ServerName serverName) {
|
||||
LOG.info("Assignment open region=" + regionInfo + " server=" + serverName);
|
||||
openCount.incrementAndGet();
|
||||
modified.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void regionClosed(final HRegionInfo regionInfo) {
|
||||
LOG.info("Assignment close region=" + regionInfo);
|
||||
closeCount.incrementAndGet();
|
||||
|
@ -103,12 +103,14 @@ public class TestAssignmentListener {
|
|||
public DummyServerListener() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serverAdded(final ServerName serverName) {
|
||||
LOG.info("Server added " + serverName);
|
||||
addedCount.incrementAndGet();
|
||||
modified.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serverRemoved(final ServerName serverName) {
|
||||
LOG.info("Server removed " + serverName);
|
||||
removedCount.incrementAndGet();
|
||||
|
@ -216,7 +218,7 @@ public class TestAssignmentListener {
|
|||
// Split the table in two
|
||||
LOG.info("Split Table");
|
||||
listener.reset();
|
||||
admin.split(TABLE_NAME_STR, "row-3");
|
||||
admin.split(TABLE_NAME, Bytes.toBytes("row-3"));
|
||||
listener.awaitModifications(3);
|
||||
assertEquals(2, listener.getLoadCount()); // daughters added
|
||||
assertEquals(1, listener.getCloseCount()); // parent removed
|
||||
|
@ -226,7 +228,7 @@ public class TestAssignmentListener {
|
|||
int mergeable = 0;
|
||||
while (mergeable < 2) {
|
||||
Thread.sleep(100);
|
||||
admin.majorCompact(TABLE_NAME_STR);
|
||||
admin.majorCompact(TABLE_NAME);
|
||||
mergeable = 0;
|
||||
for (JVMClusterUtil.RegionServerThread regionThread: miniCluster.getRegionServerThreads()) {
|
||||
for (HRegion region: regionThread.getRegionServer().getOnlineRegions(TABLE_NAME)) {
|
||||
|
|
|
@ -1225,17 +1225,17 @@ public class TestDistributedLogSplitting {
|
|||
// use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key
|
||||
row = Arrays.copyOfRange(row, 3, 8);
|
||||
long value = 0;
|
||||
byte[] tableName = Bytes.toBytes("table");
|
||||
TableName tableName = TableName.valueOf("table");
|
||||
byte[] family = Bytes.toBytes("family");
|
||||
byte[] qualifier = Bytes.toBytes("c1");
|
||||
long timeStamp = System.currentTimeMillis();
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
|
||||
HTableDescriptor htd = new HTableDescriptor();
|
||||
htd.addFamily(new HColumnDescriptor(family));
|
||||
for (int i = 0; i < NUM_LOG_LINES; i += 1) {
|
||||
WALEdit e = new WALEdit();
|
||||
value++;
|
||||
e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
|
||||
hrs.getWAL().append(curRegionInfo, TableName.valueOf(tableName), e,
|
||||
hrs.getWAL().append(curRegionInfo, tableName, e,
|
||||
System.currentTimeMillis(), htd, sequenceId);
|
||||
}
|
||||
hrs.getWAL().sync();
|
||||
|
@ -1317,17 +1317,17 @@ public class TestDistributedLogSplitting {
|
|||
// use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key
|
||||
row = Arrays.copyOfRange(row, 3, 8);
|
||||
long value = 0;
|
||||
final byte[] tableName = Bytes.toBytes("table");
|
||||
final TableName tableName = TableName.valueOf("table");
|
||||
byte[] family = Bytes.toBytes("family");
|
||||
byte[] qualifier = Bytes.toBytes("c1");
|
||||
long timeStamp = System.currentTimeMillis();
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
htd.addFamily(new HColumnDescriptor(family));
|
||||
for (int i = 0; i < NUM_LOG_LINES; i += 1) {
|
||||
WALEdit e = new WALEdit();
|
||||
value++;
|
||||
e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
|
||||
hrs.getWAL().append(curRegionInfo, TableName.valueOf(tableName), e,
|
||||
hrs.getWAL().append(curRegionInfo, tableName, e,
|
||||
System.currentTimeMillis(), htd, sequenceId);
|
||||
}
|
||||
hrs.getWAL().sync();
|
||||
|
|
|
@ -97,7 +97,7 @@ public class TestMaster {
|
|||
|
||||
// Now trigger a split and stop when the split is in progress
|
||||
LOG.info("Splitting table");
|
||||
TEST_UTIL.getHBaseAdmin().split(TABLENAME.getName());
|
||||
TEST_UTIL.getHBaseAdmin().split(TABLENAME);
|
||||
LOG.info("Waiting for split result to be about to open");
|
||||
RegionStates regionStates = m.assignmentManager.getRegionStates();
|
||||
while (regionStates.getRegionsOfTable(TABLENAME).size() <= 1) {
|
||||
|
|
|
@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
|
@ -251,7 +250,7 @@ public class TestTableLockManager {
|
|||
//ensure that znode for the table node has been deleted
|
||||
final ZooKeeperWatcher zkWatcher = TEST_UTIL.getZooKeeperWatcher();
|
||||
final String znode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, TABLE_NAME.getNameAsString());
|
||||
|
||||
|
||||
TEST_UTIL.waitFor(5000, new Waiter.Predicate<Exception>() {
|
||||
@Override
|
||||
public boolean evaluate() throws Exception {
|
||||
|
@ -373,9 +372,9 @@ public class TestTableLockManager {
|
|||
HRegion region = TEST_UTIL.getSplittableRegion(tableName, -1);
|
||||
if (region != null) {
|
||||
byte[] regionName = region.getRegionName();
|
||||
admin.flush(regionName);
|
||||
admin.compact(regionName);
|
||||
admin.split(regionName);
|
||||
admin.flushRegion(regionName);
|
||||
admin.compactRegion(regionName);
|
||||
admin.splitRegion(regionName);
|
||||
} else {
|
||||
LOG.warn("Could not find suitable region for the table. Possibly the " +
|
||||
"region got closed and the attempts got over before " +
|
||||
|
|
|
@ -117,7 +117,7 @@ public class TestEncryptionKeyRotation {
|
|||
Thread.sleep(5000); // Need a predicate for online schema change
|
||||
|
||||
// And major compact
|
||||
TEST_UTIL.getHBaseAdmin().majorCompact(htd.getName());
|
||||
TEST_UTIL.getHBaseAdmin().majorCompact(htd.getTableName());
|
||||
TEST_UTIL.waitFor(30000, 1000, true, new Predicate<Exception>() {
|
||||
@Override
|
||||
public boolean evaluate() throws Exception {
|
||||
|
@ -213,7 +213,7 @@ public class TestEncryptionKeyRotation {
|
|||
} finally {
|
||||
table.close();
|
||||
}
|
||||
TEST_UTIL.getHBaseAdmin().flush(htd.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
|
||||
}
|
||||
|
||||
private static byte[] extractHFileKey(Path path) throws Exception {
|
||||
|
|
|
@ -108,7 +108,7 @@ public class TestEncryptionRandomKeying {
|
|||
} finally {
|
||||
table.close();
|
||||
}
|
||||
TEST_UTIL.getHBaseAdmin().flush(htd.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
|
|
@ -229,6 +229,7 @@ public class TestEndToEndSplitTransaction {
|
|||
rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
Random random = new Random();
|
||||
|
@ -263,7 +264,7 @@ public class TestEndToEndSplitTransaction {
|
|||
|
||||
log("Initiating region split for:" + region.getRegionNameAsString());
|
||||
try {
|
||||
admin.split(region.getRegionName(), splitPoint);
|
||||
admin.splitRegion(region.getRegionName(), splitPoint);
|
||||
//wait until the split is complete
|
||||
blockUntilRegionSplit(conf, 50000, region.getRegionName(), true);
|
||||
|
||||
|
@ -396,7 +397,7 @@ public class TestEndToEndSplitTransaction {
|
|||
public static void flushAndBlockUntilDone(Admin admin, HRegionServer rs, byte[] regionName)
|
||||
throws IOException, InterruptedException {
|
||||
log("flushing region: " + Bytes.toStringBinary(regionName));
|
||||
admin.flush(regionName);
|
||||
admin.flushRegion(regionName);
|
||||
log("blocking until flush is complete: " + Bytes.toStringBinary(regionName));
|
||||
Threads.sleepWithoutInterrupt(500);
|
||||
while (rs.cacheFlusher.getFlushQueueSize() > 0) {
|
||||
|
@ -407,7 +408,7 @@ public class TestEndToEndSplitTransaction {
|
|||
public static void compactAndBlockUntilDone(Admin admin, HRegionServer rs, byte[] regionName)
|
||||
throws IOException, InterruptedException {
|
||||
log("Compacting region: " + Bytes.toStringBinary(regionName));
|
||||
admin.majorCompact(regionName);
|
||||
admin.majorCompactRegion(regionName);
|
||||
log("blocking until compaction is complete: " + Bytes.toStringBinary(regionName));
|
||||
Threads.sleepWithoutInterrupt(500);
|
||||
while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
|
||||
|
|
|
@ -200,7 +200,7 @@ public class TestRegionMergeTransactionOnCluster {
|
|||
assertTrue(fs.exists(regionAdir));
|
||||
assertTrue(fs.exists(regionBdir));
|
||||
|
||||
admin.compact(mergedRegionInfo.getRegionName());
|
||||
admin.compactRegion(mergedRegionInfo.getRegionName());
|
||||
// wait until merged region doesn't have reference file
|
||||
long timeout = System.currentTimeMillis() + waitTime;
|
||||
HRegionFileSystem hrfs = new HRegionFileSystem(
|
||||
|
|
|
@ -208,7 +208,7 @@ public class TestRegionServerMetrics {
|
|||
|
||||
@Test
|
||||
public void testStoreCount() throws Exception {
|
||||
byte[] tableName = Bytes.toBytes("testStoreCount");
|
||||
TableName tableName = TableName.valueOf("testStoreCount");
|
||||
byte[] cf = Bytes.toBytes("d");
|
||||
byte[] row = Bytes.toBytes("rk");
|
||||
byte[] qualifier = Bytes.toBytes("qual");
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.Tag;
|
|||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Append;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Increment;
|
||||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
|
@ -123,8 +122,8 @@ public class TestTags {
|
|||
put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
|
||||
put.setAttribute("visibility", Bytes.toBytes("myTag"));
|
||||
table.put(put);
|
||||
admin.flush(tableName.getName());
|
||||
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
|
||||
admin.flush(tableName);
|
||||
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
|
||||
for (HRegion region : regions) {
|
||||
Store store = region.getStore(fam);
|
||||
while (!(store.getStorefilesCount() > 0)) {
|
||||
|
@ -137,8 +136,8 @@ public class TestTags {
|
|||
put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
|
||||
// put1.setAttribute("visibility", Bytes.toBytes("myTag3"));
|
||||
table.put(put1);
|
||||
admin.flush(tableName.getName());
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
|
||||
admin.flush(tableName);
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
|
||||
for (HRegion region : regions) {
|
||||
Store store = region.getStore(fam);
|
||||
while (!(store.getStorefilesCount() > 1)) {
|
||||
|
@ -152,7 +151,7 @@ public class TestTags {
|
|||
put2.setAttribute("visibility", Bytes.toBytes("myTag3"));
|
||||
table.put(put2);
|
||||
|
||||
admin.flush(tableName.getName());
|
||||
admin.flush(tableName);
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
|
||||
for (HRegion region : regions) {
|
||||
Store store = region.getStore(fam);
|
||||
|
@ -161,8 +160,8 @@ public class TestTags {
|
|||
}
|
||||
}
|
||||
result(fam, row, qual, row2, table, value, value2, row1, value1);
|
||||
admin.compact(tableName.getName());
|
||||
while (admin.getCompactionState(tableName.getName()) != CompactionState.NONE) {
|
||||
admin.compact(tableName);
|
||||
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
|
||||
Thread.sleep(10);
|
||||
}
|
||||
result(fam, row, qual, row2, table, value, value2, row1, value1);
|
||||
|
@ -201,7 +200,7 @@ public class TestTags {
|
|||
byte[] value = Bytes.toBytes("value");
|
||||
put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
|
||||
table.put(put);
|
||||
admin.flush(tableName.getName());
|
||||
admin.flush(tableName);
|
||||
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
|
||||
for (HRegion region : regions) {
|
||||
Store store = region.getStore(fam);
|
||||
|
@ -214,7 +213,7 @@ public class TestTags {
|
|||
byte[] value1 = Bytes.toBytes("1000dfsdf");
|
||||
put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
|
||||
table.put(put1);
|
||||
admin.flush(tableName.getName());
|
||||
admin.flush(tableName);
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
|
||||
for (HRegion region : regions) {
|
||||
Store store = region.getStore(fam);
|
||||
|
@ -228,8 +227,8 @@ public class TestTags {
|
|||
put2.add(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
|
||||
table.put(put2);
|
||||
|
||||
admin.flush(tableName.getName());
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
|
||||
admin.flush(tableName);
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
|
||||
for (HRegion region : regions) {
|
||||
Store store = region.getStore(fam);
|
||||
while (!(store.getStorefilesCount() > 2)) {
|
||||
|
@ -250,8 +249,8 @@ public class TestTags {
|
|||
if (scanner != null)
|
||||
scanner.close();
|
||||
}
|
||||
admin.compact(tableName.getName());
|
||||
while (admin.getCompactionState(tableName.getName()) != CompactionState.NONE) {
|
||||
admin.compact(tableName);
|
||||
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
|
||||
Thread.sleep(10);
|
||||
}
|
||||
s = new Scan(row);
|
||||
|
@ -310,7 +309,7 @@ public class TestTags {
|
|||
byte[] value1 = Bytes.toBytes("1000dfsdf");
|
||||
put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
|
||||
table.put(put1);
|
||||
admin.flush(tableName.getName());
|
||||
admin.flush(tableName);
|
||||
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
|
||||
for (HRegion region : regions) {
|
||||
Store store = region.getStore(fam);
|
||||
|
@ -323,8 +322,8 @@ public class TestTags {
|
|||
value1 = Bytes.toBytes("1000dfsdf");
|
||||
put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
|
||||
table.put(put1);
|
||||
admin.flush(tableName.getName());
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
|
||||
admin.flush(tableName);
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
|
||||
for (HRegion region : regions) {
|
||||
Store store = region.getStore(fam);
|
||||
while (!(store.getStorefilesCount() > 1)) {
|
||||
|
@ -340,8 +339,8 @@ public class TestTags {
|
|||
put2.add(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
|
||||
put.setAttribute("visibility", Bytes.toBytes("ram"));
|
||||
table.put(put2);
|
||||
admin.flush(tableName.getName());
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
|
||||
admin.flush(tableName);
|
||||
regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
|
||||
for (HRegion region : regions) {
|
||||
Store store = region.getStore(fam);
|
||||
while (!(store.getStorefilesCount() > 2)) {
|
||||
|
@ -372,7 +371,7 @@ public class TestTags {
|
|||
}
|
||||
TestCoprocessorForTags.checkTagPresence = false;
|
||||
}
|
||||
while (admin.getCompactionState(tableName.getName()) != CompactionState.NONE) {
|
||||
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
|
||||
Thread.sleep(10);
|
||||
}
|
||||
TestCoprocessorForTags.checkTagPresence = true;
|
||||
|
|
|
@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
@ -324,7 +323,7 @@ public class TestLogRolling {
|
|||
TEST_UTIL.ensureSomeRegionServersAvailable(2);
|
||||
assertTrue("This test requires HLog file replication set to 2.",
|
||||
fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) == 2);
|
||||
LOG.info("Replication=" +
|
||||
LOG.info("Replication=" +
|
||||
fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
|
||||
|
||||
this.server = cluster.getRegionServer(0);
|
||||
|
@ -363,9 +362,9 @@ public class TestLogRolling {
|
|||
}
|
||||
|
||||
assertTrue("DataNodes " + dfsCluster.getDataNodes().size() +
|
||||
" default replication " +
|
||||
" default replication " +
|
||||
fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()),
|
||||
dfsCluster.getDataNodes().size() >=
|
||||
dfsCluster.getDataNodes().size() >=
|
||||
fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) + 1);
|
||||
|
||||
writeData(table, 2);
|
||||
|
@ -378,7 +377,7 @@ public class TestLogRolling {
|
|||
assertTrue("The log shouldn't have rolled yet",
|
||||
oldFilenum == ((FSHLog) log).getFilenum());
|
||||
final DatanodeInfo[] pipeline = getPipeline(log);
|
||||
assertTrue(pipeline.length ==
|
||||
assertTrue(pipeline.length ==
|
||||
fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
|
||||
|
||||
// kill a datanode in the pipeline to force a log roll on the next sync()
|
||||
|
@ -414,7 +413,7 @@ public class TestLogRolling {
|
|||
batchWriteAndWait(table, 13, true, 10000);
|
||||
assertTrue("New log file should have the default replication instead of " +
|
||||
((FSHLog) log).getLogReplication(),
|
||||
((FSHLog) log).getLogReplication() ==
|
||||
((FSHLog) log).getLogReplication() ==
|
||||
fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
|
||||
assertTrue("LowReplication Roller should've been enabled",
|
||||
log.isLowReplicationRollEnabled());
|
||||
|
@ -430,7 +429,7 @@ public class TestLogRolling {
|
|||
LOG.info("Starting testLogRollOnPipelineRestart");
|
||||
assertTrue("This test requires HLog file replication.",
|
||||
fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) > 1);
|
||||
LOG.info("Replication=" +
|
||||
LOG.info("Replication=" +
|
||||
fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
|
||||
// When the hbase:meta table can be opened, the region servers are running
|
||||
HTable t = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
|
||||
|
@ -610,12 +609,12 @@ public class TestLogRolling {
|
|||
Store s = region.getStore(HConstants.CATALOG_FAMILY);
|
||||
|
||||
//have to flush namespace to ensure it doesn't affect wall tests
|
||||
admin.flush(TableName.NAMESPACE_TABLE_NAME.getName());
|
||||
admin.flush(TableName.NAMESPACE_TABLE_NAME);
|
||||
|
||||
// Put some stuff into table2, to make sure we have some files to compact.
|
||||
for (int i = 1; i <= 2; ++i) {
|
||||
doPut(table2, i);
|
||||
admin.flush(table2.getTableName());
|
||||
admin.flush(table2.getName());
|
||||
}
|
||||
doPut(table2, 3); // don't flush yet, or compaction might trigger before we roll WAL
|
||||
assertEquals("Should have no WAL after initial writes", 0, fshLog.getNumRolledLogFiles());
|
||||
|
@ -624,7 +623,7 @@ public class TestLogRolling {
|
|||
// Roll the log and compact table2, to have compaction record in the 2nd WAL.
|
||||
fshLog.rollWriter();
|
||||
assertEquals("Should have WAL; one table is not flushed", 1, fshLog.getNumRolledLogFiles());
|
||||
admin.flush(table2.getTableName());
|
||||
admin.flush(table2.getName());
|
||||
region.compactStores();
|
||||
// Wait for compaction in case if flush triggered it before us.
|
||||
Assert.assertNotNull(s);
|
||||
|
@ -639,7 +638,7 @@ public class TestLogRolling {
|
|||
assertEquals("Should have WAL; one table is not flushed", 1, fshLog.getNumRolledLogFiles());
|
||||
|
||||
// Flush table to make latest WAL obsolete; write another record, and roll again.
|
||||
admin.flush(table.getTableName());
|
||||
admin.flush(table.getName());
|
||||
doPut(table, 1);
|
||||
fshLog.rollWriter(); // Now 2nd WAL is deleted and 3rd is added.
|
||||
assertEquals("Should have 1 WALs at the end", 1, fshLog.getNumRolledLogFiles());
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
|
@ -63,7 +62,7 @@ public class TestTableResource {
|
|||
private static Map<HRegionInfo, ServerName> regionMap;
|
||||
|
||||
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
private static final HBaseRESTTestingUtility REST_TEST_UTIL =
|
||||
private static final HBaseRESTTestingUtility REST_TEST_UTIL =
|
||||
new HBaseRESTTestingUtility();
|
||||
private static Client client;
|
||||
private static JAXBContext context;
|
||||
|
@ -72,7 +71,7 @@ public class TestTableResource {
|
|||
public static void setUpBeforeClass() throws Exception {
|
||||
TEST_UTIL.startMiniCluster(3);
|
||||
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
|
||||
client = new Client(new Cluster().add("localhost",
|
||||
client = new Client(new Cluster().add("localhost",
|
||||
REST_TEST_UTIL.getServletPort()));
|
||||
context = JAXBContext.newInstance(
|
||||
TableModel.class,
|
||||
|
@ -107,7 +106,7 @@ public class TestTableResource {
|
|||
Map<HRegionInfo, ServerName> m = table.getRegionLocations();
|
||||
assertEquals(m.size(), 1);
|
||||
// tell the master to split the table
|
||||
admin.split(TABLE.toBytes());
|
||||
admin.split(TABLE);
|
||||
// give some time for the split to happen
|
||||
|
||||
long timeout = System.currentTimeMillis() + (15 * 1000);
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.client.Admin;
|
|||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.LargeTests;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
|
@ -133,7 +132,7 @@ public class TestTablePermissions {
|
|||
}
|
||||
|
||||
/**
|
||||
* Writes a set of permissions as {@link org.apache.hadoop.io.Writable} instances
|
||||
* Writes a set of permissions as {@link org.apache.hadoop.io.Writable} instances
|
||||
* and returns the resulting byte array. Used to verify we can read stuff written
|
||||
* with Writable.
|
||||
*/
|
||||
|
@ -157,7 +156,7 @@ public class TestTablePermissions {
|
|||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void writePermissions(DataOutput out,
|
||||
public static void writePermissions(DataOutput out,
|
||||
ListMultimap<String,? extends Permission> perms, Configuration conf)
|
||||
throws IOException {
|
||||
Set<String> keys = perms.keySet();
|
||||
|
@ -294,7 +293,7 @@ public class TestTablePermissions {
|
|||
table.put(new Put(Bytes.toBytes("row2"))
|
||||
.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
|
||||
Admin admin = UTIL.getHBaseAdmin();
|
||||
admin.split(TEST_TABLE.getName());
|
||||
admin.split(TEST_TABLE);
|
||||
|
||||
// wait for split
|
||||
Thread.sleep(10000);
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.MediumTests;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
@ -115,6 +114,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
SECRET);
|
||||
try {
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
HTable table = null;
|
||||
try {
|
||||
|
@ -133,7 +133,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL));
|
||||
ResultScanner scanner = table.getScanner(s);
|
||||
|
@ -160,6 +160,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
+ TOPSECRET);
|
||||
try {
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -175,7 +176,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL));
|
||||
ResultScanner scanner = table.getScanner(s);
|
||||
|
@ -202,6 +203,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
+ TOPSECRET, SECRET);
|
||||
try {
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
HTable table = null;
|
||||
try {
|
||||
|
@ -220,7 +222,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL));
|
||||
ResultScanner scanner = table.getScanner(s);
|
||||
|
@ -247,6 +249,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
+ TOPSECRET, SECRET);
|
||||
try {
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
HTable table = null;
|
||||
try {
|
||||
|
@ -265,7 +268,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL));
|
||||
ResultScanner scanner = table.getScanner(s);
|
||||
|
@ -290,8 +293,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -308,7 +312,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -351,8 +355,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -407,8 +412,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -467,8 +473,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -534,8 +541,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.add(fam, qual, value);
|
||||
put.setCellVisibility(new CellVisibility(SECRET));
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -562,6 +570,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.setCellVisibility(new CellVisibility(CONFIDENTIAL));
|
||||
table.put(put);
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -616,8 +625,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.add(fam, qual, value);
|
||||
put.setCellVisibility(new CellVisibility(SECRET));
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -644,6 +654,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.setCellVisibility(new CellVisibility(CONFIDENTIAL));
|
||||
table.put(put);
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -700,6 +711,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
table.put(put);
|
||||
table.flushCommits();
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -755,7 +767,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.setCellVisibility(new CellVisibility(SECRET));
|
||||
table.put(put);
|
||||
table.flushCommits();
|
||||
//TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
//TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(CONFIDENTIAL, SECRET));
|
||||
|
@ -763,6 +775,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
Result[] next = scanner.next(3);
|
||||
assertEquals(next.length, 1);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -806,8 +819,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -865,8 +879,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -958,7 +973,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|("
|
||||
+ TOPSECRET + "&" + SECRET+")"));
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
put = new Put(Bytes.toBytes("row2"));
|
||||
put.add(fam, qual, 127l, value);
|
||||
put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET
|
||||
|
@ -1027,7 +1042,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put = new Put(Bytes.toBytes("row1"));
|
||||
put.add(fam, qual, 127l, value);
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
put = new Put(Bytes.toBytes("row2"));
|
||||
put.add(fam, qual, 127l, value);
|
||||
table.put(put);
|
||||
|
@ -1043,8 +1058,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1061,7 +1077,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1113,8 +1129,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1130,7 +1147,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1177,12 +1194,13 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Put put = new Put(Bytes.toBytes("row1"));
|
||||
put.add(fam, qual, 128l, value);
|
||||
put.setCellVisibility(new CellVisibility(TOPSECRET));
|
||||
table.put(put);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1198,7 +1216,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1242,7 +1260,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.setCellVisibility(new CellVisibility(SECRET));
|
||||
table.put(put);
|
||||
table.flushCommits();
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1269,8 +1287,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1285,13 +1304,13 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
}
|
||||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Put put = new Put(Bytes.toBytes("row3"));
|
||||
put.add(fam, qual, 127l, value);
|
||||
put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "&" + PRIVATE));
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().majorCompact(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().majorCompact(tableName);
|
||||
// Sleep to ensure compaction happens. Need to do it in a better way
|
||||
Thread.sleep(5000);
|
||||
Scan s = new Scan();
|
||||
|
@ -1340,8 +1359,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1357,7 +1377,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1394,8 +1414,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPutsWithDiffCols(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1411,7 +1432,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1470,8 +1491,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.add(fam, qual1, 126l, value);
|
||||
put.setCellVisibility(new CellVisibility(SECRET));
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1523,8 +1545,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.add(fam, qual1, 126l, value);
|
||||
put.setCellVisibility(new CellVisibility(SECRET));
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1563,8 +1586,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPutsWithoutVisibility(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1579,7 +1603,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1608,6 +1632,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
try {
|
||||
table = doPutsWithoutVisibility(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1622,7 +1647,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
}
|
||||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1674,8 +1699,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1692,7 +1718,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1734,8 +1760,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1752,13 +1779,13 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Put put = new Put(Bytes.toBytes("row3"));
|
||||
put.add(fam, qual, 127l, value);
|
||||
put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "&" + PRIVATE));
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().compact(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().compact(tableName);
|
||||
Thread.sleep(5000);
|
||||
// Sleep to ensure compaction happens. Need to do it in a better way
|
||||
Scan s = new Scan();
|
||||
|
@ -1794,6 +1821,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
// Do not flush here.
|
||||
table = doPuts(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1845,6 +1873,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
|
||||
// Issue 2nd delete
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1894,6 +1923,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
public void testMultipleDeleteFamilyVersionWithDiffLabels() throws Exception {
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
|
||||
@Override
|
||||
public VisibilityLabelsResponse run() throws Exception {
|
||||
try {
|
||||
return VisibilityClient.setAuths(conf, new String[] { CONFIDENTIAL, PRIVATE, SECRET },
|
||||
|
@ -1908,6 +1938,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = doPuts(tableName);
|
||||
try {
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1927,7 +1958,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -1964,6 +1995,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = doPuts(tableName);
|
||||
try {
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -1984,7 +2016,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -2009,6 +2041,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
assertEquals(current.getTimestamp(), 123l);
|
||||
// Issue 2nd delete
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2051,6 +2084,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
public void testSpecificDeletesFollowedByDeleteFamily1() throws Exception {
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
|
||||
@Override
|
||||
public VisibilityLabelsResponse run() throws Exception {
|
||||
try {
|
||||
return VisibilityClient.setAuths(conf, new String[] { CONFIDENTIAL, PRIVATE, SECRET },
|
||||
|
@ -2065,6 +2099,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = doPuts(tableName);
|
||||
try {
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2086,7 +2121,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -2111,6 +2146,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
assertEquals(current.getTimestamp(), 123l);
|
||||
// Issue 2nd delete
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2159,6 +2195,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
// Do not flush here.
|
||||
table = doPuts(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2209,6 +2246,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
|
||||
// Issue 2nd delete
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2268,6 +2306,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
// Do not flush here.
|
||||
table = doPuts(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2319,6 +2358,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
|
||||
// Issue 2nd delete
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2381,6 +2421,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
// Do not flush here.
|
||||
table = doPuts(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2437,6 +2478,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
|
||||
// Issue 2nd delete
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2501,6 +2543,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
// Do not flush here.
|
||||
table = doPuts(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2551,6 +2594,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
|
||||
// Issue 2nd delete
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2599,6 +2643,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
private void setAuths() throws IOException, InterruptedException {
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
|
||||
@Override
|
||||
public VisibilityLabelsResponse run() throws Exception {
|
||||
try {
|
||||
return VisibilityClient.setAuths(conf, new String[] { CONFIDENTIAL, PRIVATE, SECRET,
|
||||
|
@ -2620,6 +2665,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
// Do not flush here.
|
||||
table = doPuts(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2671,6 +2717,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
|
||||
// Issue 2nd delete
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2727,8 +2774,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
HTable table = null;
|
||||
try {
|
||||
table = doPuts(tableName);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2743,7 +2791,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -2752,6 +2800,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
assertTrue(next.length == 2);
|
||||
scanAll(next);
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2766,7 +2815,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -2776,6 +2825,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
scanAll(next);
|
||||
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2790,7 +2840,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -2800,6 +2850,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
scanAll(next);
|
||||
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2814,7 +2865,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -2824,6 +2875,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
scanAll(next);
|
||||
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2838,7 +2890,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -2848,6 +2900,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
scanAll(next);
|
||||
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2862,7 +2915,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -2932,8 +2985,9 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.add(fam, qual, 124l, value);
|
||||
put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "|" + PRIVATE));
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
HTable table = new HTable(conf, TEST_NAME.getMethodName());
|
||||
|
@ -2949,7 +3003,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
};
|
||||
SUPERUSER.runAs(actiona);
|
||||
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
Scan s = new Scan();
|
||||
s.setMaxVersions(5);
|
||||
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
|
||||
|
@ -3004,7 +3058,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
put.setCellVisibility(new CellVisibility(labelExp));
|
||||
puts.add(put);
|
||||
table.put(put);
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
i++;
|
||||
}
|
||||
return table;
|
||||
|
@ -3013,6 +3067,7 @@ public class TestVisibilityLabelsWithDeletes {
|
|||
public static void addLabels() throws Exception {
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
|
||||
@Override
|
||||
public VisibilityLabelsResponse run() throws Exception {
|
||||
String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE };
|
||||
try {
|
||||
|
|
|
@ -87,8 +87,9 @@ public class TestVisibilityWithCheckAuths {
|
|||
}
|
||||
|
||||
public static void addLabels() throws Exception {
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
|
||||
@Override
|
||||
public VisibilityLabelsResponse run() throws Exception {
|
||||
String[] labels = { TOPSECRET };
|
||||
try {
|
||||
|
@ -104,8 +105,9 @@ public class TestVisibilityWithCheckAuths {
|
|||
|
||||
@Test
|
||||
public void testVerifyAccessDeniedForInvalidUserAuths() throws Exception {
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
|
||||
@Override
|
||||
public VisibilityLabelsResponse run() throws Exception {
|
||||
try {
|
||||
return VisibilityClient.setAuths(conf, new String[] { TOPSECRET },
|
||||
|
@ -125,8 +127,9 @@ public class TestVisibilityWithCheckAuths {
|
|||
hBaseAdmin.createTable(desc);
|
||||
HTable table = null;
|
||||
try {
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName.getNameAsString());
|
||||
TEST_UTIL.getHBaseAdmin().flush(tableName);
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
HTable table = null;
|
||||
try {
|
||||
|
@ -152,8 +155,9 @@ public class TestVisibilityWithCheckAuths {
|
|||
|
||||
@Test
|
||||
public void testLabelsWithAppend() throws Throwable {
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
PrivilegedExceptionAction<VisibilityLabelsResponse> action =
|
||||
new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
|
||||
@Override
|
||||
public VisibilityLabelsResponse run() throws Exception {
|
||||
try {
|
||||
return VisibilityClient.setAuths(conf, new String[] { TOPSECRET },
|
||||
|
@ -171,6 +175,7 @@ public class TestVisibilityWithCheckAuths {
|
|||
final byte[] row1 = Bytes.toBytes("row1");
|
||||
final byte[] val = Bytes.toBytes("a");
|
||||
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
HTable table = null;
|
||||
try {
|
||||
|
@ -187,6 +192,7 @@ public class TestVisibilityWithCheckAuths {
|
|||
};
|
||||
USER.runAs(actiona);
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
HTable table = null;
|
||||
try {
|
||||
|
@ -202,6 +208,7 @@ public class TestVisibilityWithCheckAuths {
|
|||
};
|
||||
USER.runAs(actiona);
|
||||
actiona = new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
HTable table = null;
|
||||
try {
|
||||
|
|
|
@ -37,7 +37,6 @@ import java.util.HashSet;
|
|||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.Set;
|
||||
|
@ -64,7 +63,6 @@ import org.apache.hadoop.hbase.HRegionLocation;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.LargeTests;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
|
@ -77,17 +75,14 @@ import org.apache.hadoop.hbase.client.HConnection;
|
|||
import org.apache.hadoop.hbase.client.HConnectionManager;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.MetaScanner;
|
||||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.RowMutations;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.hfile.TestHFile;
|
||||
import org.apache.hadoop.hbase.master.AssignmentManager;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.RegionState;
|
||||
import org.apache.hadoop.hbase.master.RegionStates;
|
||||
import org.apache.hadoop.hbase.master.TableLockManager;
|
||||
import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
|
||||
|
@ -589,7 +584,7 @@ public class TestHBaseFsck {
|
|||
TableName.valueOf("testHbckWithRegionReplica");
|
||||
try {
|
||||
setupTableWithRegionReplica(table, 2);
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
assertNoErrors(doFsck(conf, false));
|
||||
} finally {
|
||||
deleteTable(table);
|
||||
|
@ -602,7 +597,7 @@ public class TestHBaseFsck {
|
|||
TableName.valueOf("testHbckWithFewerReplica");
|
||||
try {
|
||||
setupTableWithRegionReplica(table, 2);
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
assertNoErrors(doFsck(conf, false));
|
||||
assertEquals(ROWKEYS.length, countRows());
|
||||
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
|
||||
|
@ -626,7 +621,7 @@ public class TestHBaseFsck {
|
|||
TableName.valueOf("testHbckWithExcessReplica");
|
||||
try {
|
||||
setupTableWithRegionReplica(table, 2);
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
assertNoErrors(doFsck(conf, false));
|
||||
assertEquals(ROWKEYS.length, countRows());
|
||||
// the next few lines inject a location in meta for a replica, and then
|
||||
|
@ -1183,7 +1178,7 @@ public class TestHBaseFsck {
|
|||
assertEquals(ROWKEYS.length, countRows());
|
||||
|
||||
// make sure data in regions, if in hlog only there is no data loss
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
|
||||
// Mess it up by leaving a hole in the hdfs data
|
||||
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
|
||||
|
@ -1230,7 +1225,7 @@ public class TestHBaseFsck {
|
|||
}
|
||||
}
|
||||
// make sure data in regions
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
|
||||
// Mess it up by leaving a hole in the hdfs data
|
||||
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
|
||||
|
@ -1289,7 +1284,7 @@ public class TestHBaseFsck {
|
|||
assertEquals(ROWKEYS.length, countRows());
|
||||
|
||||
// make sure data in regions, if in hlog only there is no data loss
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
|
||||
// Mess it up by deleting hdfs dirs
|
||||
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""),
|
||||
|
@ -1423,14 +1418,14 @@ public class TestHBaseFsck {
|
|||
try {
|
||||
setupTable(table1);
|
||||
// make sure data in regions, if in hlog only there is no data loss
|
||||
TEST_UTIL.getHBaseAdmin().flush(table1.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table1);
|
||||
// Mess them up by leaving a hole in the hdfs data
|
||||
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
|
||||
Bytes.toBytes("C"), false, false, true); // don't rm meta
|
||||
|
||||
setupTable(table2);
|
||||
// make sure data in regions, if in hlog only there is no data loss
|
||||
TEST_UTIL.getHBaseAdmin().flush(table2.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table2);
|
||||
// Mess them up by leaving a hole in the hdfs data
|
||||
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
|
||||
Bytes.toBytes("C"), false, false, true); // don't rm meta
|
||||
|
@ -1470,7 +1465,7 @@ public class TestHBaseFsck {
|
|||
assertEquals(ROWKEYS.length, countRows());
|
||||
|
||||
// make sure data in regions, if in hlog only there is no data loss
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
HRegionLocation location = tbl.getRegionLocation("B");
|
||||
|
||||
// Delete one region from meta, but not hdfs, unassign it.
|
||||
|
@ -1492,7 +1487,7 @@ public class TestHBaseFsck {
|
|||
|
||||
MetaTableAccessor.addRegionToMeta(meta, hri, a, b);
|
||||
meta.flushCommits();
|
||||
TEST_UTIL.getHBaseAdmin().flush(TableName.META_TABLE_NAME.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(TableName.META_TABLE_NAME);
|
||||
|
||||
HBaseFsck hbck = doFsck(conf, false);
|
||||
assertErrors(hbck, new ERROR_CODE[] {
|
||||
|
@ -1522,7 +1517,7 @@ public class TestHBaseFsck {
|
|||
HConstants.SPLITA_QUALIFIER).isEmpty());
|
||||
assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY,
|
||||
HConstants.SPLITB_QUALIFIER).isEmpty());
|
||||
TEST_UTIL.getHBaseAdmin().flush(TableName.META_TABLE_NAME.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(TableName.META_TABLE_NAME);
|
||||
|
||||
// fix other issues
|
||||
doFsck(conf, true);
|
||||
|
@ -1550,7 +1545,7 @@ public class TestHBaseFsck {
|
|||
assertEquals(ROWKEYS.length, countRows());
|
||||
|
||||
// make sure data in regions, if in hlog only there is no data loss
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
HRegionLocation location = tbl.getRegionLocation("B");
|
||||
|
||||
meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName());
|
||||
|
@ -1559,7 +1554,7 @@ public class TestHBaseFsck {
|
|||
// do a regular split
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
byte[] regionName = location.getRegionInfo().getRegionName();
|
||||
admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
|
||||
admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
|
||||
TestEndToEndSplitTransaction.blockUntilRegionSplit(
|
||||
TEST_UTIL.getConfiguration(), 60000, regionName, true);
|
||||
|
||||
|
@ -1600,7 +1595,7 @@ public class TestHBaseFsck {
|
|||
assertEquals(ROWKEYS.length, countRows());
|
||||
|
||||
// make sure data in regions, if in hlog only there is no data loss
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
HRegionLocation location = tbl.getRegionLocation("B");
|
||||
|
||||
meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName());
|
||||
|
@ -1609,7 +1604,7 @@ public class TestHBaseFsck {
|
|||
// do a regular split
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
byte[] regionName = location.getRegionInfo().getRegionName();
|
||||
admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
|
||||
admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
|
||||
TestEndToEndSplitTransaction.blockUntilRegionSplit(
|
||||
TEST_UTIL.getConfiguration(), 60000, regionName, true);
|
||||
|
||||
|
@ -1695,7 +1690,7 @@ public class TestHBaseFsck {
|
|||
TableName.valueOf("testSingleRegionDeployedNotInHdfs");
|
||||
try {
|
||||
setupTable(table);
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
|
||||
// Mess it up by deleting region dir
|
||||
deleteRegion(conf, tbl.getTableDescriptor(),
|
||||
|
@ -1943,7 +1938,7 @@ public class TestHBaseFsck {
|
|||
try {
|
||||
setupTable(table);
|
||||
assertEquals(ROWKEYS.length, countRows());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName()); // flush is async.
|
||||
TEST_UTIL.getHBaseAdmin().flush(table); // flush is async.
|
||||
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
Path hfile = getFlushedHFile(fs, table);
|
||||
|
@ -1982,7 +1977,7 @@ public class TestHBaseFsck {
|
|||
try {
|
||||
setupTable(table);
|
||||
assertEquals(ROWKEYS.length, countRows());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName()); // flush is async.
|
||||
TEST_UTIL.getHBaseAdmin().flush(table); // flush is async.
|
||||
|
||||
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
|
||||
TEST_UTIL.getHBaseAdmin().disableTable(table);
|
||||
|
@ -2458,7 +2453,7 @@ public class TestHBaseFsck {
|
|||
assertEquals(ROWKEYS.length, countRows());
|
||||
|
||||
// make sure data in regions, if in hlog only there is no data loss
|
||||
TEST_UTIL.getHBaseAdmin().flush(table.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(table);
|
||||
HRegionInfo region1 = tbl.getRegionLocation("A").getRegionInfo();
|
||||
HRegionInfo region2 = tbl.getRegionLocation("B").getRegionInfo();
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ public class TestHBaseFsckEncryption {
|
|||
table.close();
|
||||
}
|
||||
// Flush it
|
||||
TEST_UTIL.getHBaseAdmin().flush(htd.getName());
|
||||
TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
|
||||
|
||||
// Verify we have encrypted store files on disk
|
||||
final List<Path> paths = findStorefilePaths(htd.getName());
|
||||
|
|
Loading…
Reference in New Issue