HBASE-21872 Use a call that defaults to UTF-8 charset for string to byte encoding
Fixed commit message Signed-off-by: Sean Busbey <busbey@apache.org>
This commit is contained in:
parent
d0e4912f67
commit
ae0198084c
|
@ -141,12 +141,12 @@ public final class BackupSystemTable implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Stores backup sessions (contexts)
|
* Stores backup sessions (contexts)
|
||||||
*/
|
*/
|
||||||
final static byte[] SESSIONS_FAMILY = "session".getBytes();
|
final static byte[] SESSIONS_FAMILY = Bytes.toBytes("session");
|
||||||
/**
|
/**
|
||||||
* Stores other meta
|
* Stores other meta
|
||||||
*/
|
*/
|
||||||
final static byte[] META_FAMILY = "meta".getBytes();
|
final static byte[] META_FAMILY = Bytes.toBytes("meta");
|
||||||
final static byte[] BULK_LOAD_FAMILY = "bulk".getBytes();
|
final static byte[] BULK_LOAD_FAMILY = Bytes.toBytes("bulk");
|
||||||
/**
|
/**
|
||||||
* Connection to HBase cluster, shared among all instances
|
* Connection to HBase cluster, shared among all instances
|
||||||
*/
|
*/
|
||||||
|
@ -154,20 +154,20 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
private final static String BACKUP_INFO_PREFIX = "session:";
|
private final static String BACKUP_INFO_PREFIX = "session:";
|
||||||
private final static String START_CODE_ROW = "startcode:";
|
private final static String START_CODE_ROW = "startcode:";
|
||||||
private final static byte[] ACTIVE_SESSION_ROW = "activesession:".getBytes();
|
private final static byte[] ACTIVE_SESSION_ROW = Bytes.toBytes("activesession:");
|
||||||
private final static byte[] ACTIVE_SESSION_COL = "c".getBytes();
|
private final static byte[] ACTIVE_SESSION_COL = Bytes.toBytes("c");
|
||||||
|
|
||||||
private final static byte[] ACTIVE_SESSION_YES = "yes".getBytes();
|
private final static byte[] ACTIVE_SESSION_YES = Bytes.toBytes("yes");
|
||||||
private final static byte[] ACTIVE_SESSION_NO = "no".getBytes();
|
private final static byte[] ACTIVE_SESSION_NO = Bytes.toBytes("no");
|
||||||
|
|
||||||
private final static String INCR_BACKUP_SET = "incrbackupset:";
|
private final static String INCR_BACKUP_SET = "incrbackupset:";
|
||||||
private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:";
|
private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:";
|
||||||
private final static String RS_LOG_TS_PREFIX = "rslogts:";
|
private final static String RS_LOG_TS_PREFIX = "rslogts:";
|
||||||
|
|
||||||
private final static String BULK_LOAD_PREFIX = "bulk:";
|
private final static String BULK_LOAD_PREFIX = "bulk:";
|
||||||
private final static byte[] BULK_LOAD_PREFIX_BYTES = BULK_LOAD_PREFIX.getBytes();
|
private final static byte[] BULK_LOAD_PREFIX_BYTES = Bytes.toBytes(BULK_LOAD_PREFIX);
|
||||||
private final static byte[] DELETE_OP_ROW = "delete_op_row".getBytes();
|
private final static byte[] DELETE_OP_ROW = Bytes.toBytes("delete_op_row");
|
||||||
private final static byte[] MERGE_OP_ROW = "merge_op_row".getBytes();
|
private final static byte[] MERGE_OP_ROW = Bytes.toBytes("merge_op_row");
|
||||||
|
|
||||||
final static byte[] TBL_COL = Bytes.toBytes("tbl");
|
final static byte[] TBL_COL = Bytes.toBytes("tbl");
|
||||||
final static byte[] FAM_COL = Bytes.toBytes("fam");
|
final static byte[] FAM_COL = Bytes.toBytes("fam");
|
||||||
|
@ -1615,7 +1615,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
Bytes.toString(region), BLK_LD_DELIM, filename));
|
Bytes.toString(region), BLK_LD_DELIM, filename));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
|
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, entry.getKey());
|
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, entry.getKey());
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes());
|
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
|
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
|
||||||
puts.add(put);
|
puts.add(put);
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
|
@ -1695,7 +1695,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
Bytes.toString(region), BLK_LD_DELIM, filename));
|
Bytes.toString(region), BLK_LD_DELIM, filename));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
|
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family);
|
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family);
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes());
|
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_PREPARE);
|
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_PREPARE);
|
||||||
puts.add(put);
|
puts.add(put);
|
||||||
LOG.debug("writing raw bulk path " + file + " for " + table + " " + Bytes.toString(region));
|
LOG.debug("writing raw bulk path " + file + " for " + table + " " + Bytes.toString(region));
|
||||||
|
@ -1902,7 +1902,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx));
|
Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName());
|
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName());
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam);
|
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam);
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, p.getBytes());
|
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(p));
|
||||||
return put;
|
return put;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2006,7 +2006,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte[] convertToByteArray(String[] tables) {
|
private byte[] convertToByteArray(String[] tables) {
|
||||||
return StringUtils.join(tables, ",").getBytes();
|
return Bytes.toBytes(StringUtils.join(tables, ","));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2037,6 +2037,6 @@ public final class BackupSystemTable implements Closeable {
|
||||||
for (String ss : other) {
|
for (String ss : other) {
|
||||||
sb.append(ss);
|
sb.append(ss);
|
||||||
}
|
}
|
||||||
return sb.toString().getBytes();
|
return Bytes.toBytes(sb.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -155,11 +155,11 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
||||||
String fam = famEntry.getKey();
|
String fam = famEntry.getKey();
|
||||||
Path famDir = new Path(regionDir, fam);
|
Path famDir = new Path(regionDir, fam);
|
||||||
List<Path> files;
|
List<Path> files;
|
||||||
if (!mapForSrc[srcIdx].containsKey(fam.getBytes())) {
|
if (!mapForSrc[srcIdx].containsKey(Bytes.toBytes(fam))) {
|
||||||
files = new ArrayList<>();
|
files = new ArrayList<>();
|
||||||
mapForSrc[srcIdx].put(fam.getBytes(), files);
|
mapForSrc[srcIdx].put(Bytes.toBytes(fam), files);
|
||||||
} else {
|
} else {
|
||||||
files = mapForSrc[srcIdx].get(fam.getBytes());
|
files = mapForSrc[srcIdx].get(Bytes.toBytes(fam));
|
||||||
}
|
}
|
||||||
Path archiveDir = HFileArchiveUtil.getStoreArchivePath(conf, srcTable, regionName, fam);
|
Path archiveDir = HFileArchiveUtil.getStoreArchivePath(conf, srcTable, regionName, fam);
|
||||||
String tblName = srcTable.getQualifierAsString();
|
String tblName = srcTable.getQualifierAsString();
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
|
||||||
import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinationManager;
|
import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinationManager;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.security.access.AccessChecker;
|
import org.apache.hadoop.hbase.security.access.AccessChecker;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -130,7 +131,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
|
||||||
byte[] data = new byte[0];
|
byte[] data = new byte[0];
|
||||||
if (conf.size() > 0) {
|
if (conf.size() > 0) {
|
||||||
// Get backup root path
|
// Get backup root path
|
||||||
data = conf.get(0).getValue().getBytes();
|
data = Bytes.toBytes(conf.get(0).getValue());
|
||||||
}
|
}
|
||||||
Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(), data, servers);
|
Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(), data, servers);
|
||||||
if (proc == null) {
|
if (proc == null) {
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
@ -63,7 +64,7 @@ public class TestBackupDeleteRestore extends TestBackupBase {
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
||||||
// delete row
|
// delete row
|
||||||
try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
|
try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
|
||||||
Delete delete = new Delete("row0".getBytes());
|
Delete delete = new Delete(Bytes.toBytes("row0"));
|
||||||
table.delete(delete);
|
table.delete(delete);
|
||||||
hba.flush(table1);
|
hba.flush(table1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -132,7 +133,7 @@ public class TestBackupHFileCleaner {
|
||||||
sTableList.add(tableName);
|
sTableList.add(tableName);
|
||||||
Map<byte[], List<Path>>[] maps = new Map[1];
|
Map<byte[], List<Path>>[] maps = new Map[1];
|
||||||
maps[0] = new HashMap<>();
|
maps[0] = new HashMap<>();
|
||||||
maps[0].put(famName.getBytes(), list);
|
maps[0].put(Bytes.toBytes(famName), list);
|
||||||
sysTbl.writeBulkLoadedFiles(sTableList, maps, "1");
|
sysTbl.writeBulkLoadedFiles(sTableList, maps, "1");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,12 +87,12 @@ public class RESTDemoClient {
|
||||||
try (RemoteHTable remoteTable = new RemoteHTable(restClient, conf, "example")) {
|
try (RemoteHTable remoteTable = new RemoteHTable(restClient, conf, "example")) {
|
||||||
// Write data to the table
|
// Write data to the table
|
||||||
String rowKey = "row1";
|
String rowKey = "row1";
|
||||||
Put p = new Put(rowKey.getBytes());
|
Put p = new Put(Bytes.toBytes(rowKey));
|
||||||
p.addColumn("family1".getBytes(), "qualifier1".getBytes(), "value1".getBytes());
|
p.addColumn(Bytes.toBytes("family1"), Bytes.toBytes("qualifier1"), Bytes.toBytes("value1"));
|
||||||
remoteTable.put(p);
|
remoteTable.put(p);
|
||||||
|
|
||||||
// Get the data from the table
|
// Get the data from the table
|
||||||
Get g = new Get(rowKey.getBytes());
|
Get g = new Get(Bytes.toBytes(rowKey));
|
||||||
Result result = remoteTable.get(g);
|
Result result = remoteTable.get(g);
|
||||||
|
|
||||||
Preconditions.checkArgument(result != null,
|
Preconditions.checkArgument(result != null,
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.thrift2.generated.TGet;
|
||||||
import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
|
import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
|
||||||
import org.apache.hadoop.hbase.thrift2.generated.TPut;
|
import org.apache.hadoop.hbase.thrift2.generated.TPut;
|
||||||
import org.apache.hadoop.hbase.thrift2.generated.TResult;
|
import org.apache.hadoop.hbase.thrift2.generated.TResult;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.thrift.protocol.TBinaryProtocol;
|
import org.apache.thrift.protocol.TBinaryProtocol;
|
||||||
import org.apache.thrift.protocol.TProtocol;
|
import org.apache.thrift.protocol.TProtocol;
|
||||||
import org.apache.thrift.transport.TFramedTransport;
|
import org.apache.thrift.transport.TFramedTransport;
|
||||||
|
@ -116,15 +117,15 @@ public class DemoClient {
|
||||||
// open the transport
|
// open the transport
|
||||||
transport.open();
|
transport.open();
|
||||||
|
|
||||||
ByteBuffer table = ByteBuffer.wrap("example".getBytes());
|
ByteBuffer table = ByteBuffer.wrap(Bytes.toBytes("example"));
|
||||||
|
|
||||||
TPut put = new TPut();
|
TPut put = new TPut();
|
||||||
put.setRow("row1".getBytes());
|
put.setRow(Bytes.toBytes("row1"));
|
||||||
|
|
||||||
TColumnValue columnValue = new TColumnValue();
|
TColumnValue columnValue = new TColumnValue();
|
||||||
columnValue.setFamily("family1".getBytes());
|
columnValue.setFamily(Bytes.toBytes("family1"));
|
||||||
columnValue.setQualifier("qualifier1".getBytes());
|
columnValue.setQualifier(Bytes.toBytes("qualifier1"));
|
||||||
columnValue.setValue("value1".getBytes());
|
columnValue.setValue(Bytes.toBytes("value1"));
|
||||||
List<TColumnValue> columnValues = new ArrayList<>(1);
|
List<TColumnValue> columnValues = new ArrayList<>(1);
|
||||||
columnValues.add(columnValue);
|
columnValues.add(columnValue);
|
||||||
put.setColumnValues(columnValues);
|
put.setColumnValues(columnValues);
|
||||||
|
@ -132,7 +133,7 @@ public class DemoClient {
|
||||||
client.put(table, put);
|
client.put(table, put);
|
||||||
|
|
||||||
TGet get = new TGet();
|
TGet get = new TGet();
|
||||||
get.setRow("row1".getBytes());
|
get.setRow(Bytes.toBytes("row1"));
|
||||||
|
|
||||||
TResult result = client.get(table, get);
|
TResult result = client.get(table, get);
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.http;
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -27,11 +29,11 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public final class HtmlQuoting {
|
public final class HtmlQuoting {
|
||||||
private static final byte[] ampBytes = "&".getBytes();
|
private static final byte[] ampBytes = Bytes.toBytes("&");
|
||||||
private static final byte[] aposBytes = "'".getBytes();
|
private static final byte[] aposBytes = Bytes.toBytes("'");
|
||||||
private static final byte[] gtBytes = ">".getBytes();
|
private static final byte[] gtBytes = Bytes.toBytes(">");
|
||||||
private static final byte[] ltBytes = "<".getBytes();
|
private static final byte[] ltBytes = Bytes.toBytes("<");
|
||||||
private static final byte[] quotBytes = """.getBytes();
|
private static final byte[] quotBytes = Bytes.toBytes(""");
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Does the given string need to be quoted?
|
* Does the given string need to be quoted?
|
||||||
|
@ -69,7 +71,7 @@ public final class HtmlQuoting {
|
||||||
if (str == null) {
|
if (str == null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
byte[] bytes = str.getBytes();
|
byte[] bytes = Bytes.toBytes(str);
|
||||||
return needsQuoting(bytes, 0 , bytes.length);
|
return needsQuoting(bytes, 0 , bytes.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,7 +106,7 @@ public final class HtmlQuoting {
|
||||||
if (item == null) {
|
if (item == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
byte[] bytes = item.getBytes();
|
byte[] bytes = Bytes.toBytes(item);
|
||||||
if (needsQuoting(bytes, 0, bytes.length)) {
|
if (needsQuoting(bytes, 0, bytes.length)) {
|
||||||
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
|
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -112,7 +112,7 @@ public class IntegrationTestLazyCfLoading {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public byte[] getDeterministicUniqueKey(long keyBase) {
|
public byte[] getDeterministicUniqueKey(long keyBase) {
|
||||||
return LoadTestKVGenerator.md5PrefixedKey(keyBase).getBytes();
|
return Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(keyBase));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -545,7 +545,7 @@ public class Import extends Configured implements Tool {
|
||||||
if(srcAndDest.length != 2) {
|
if(srcAndDest.length != 2) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
cfRenameMap.put(srcAndDest[0].getBytes(), srcAndDest[1].getBytes());
|
cfRenameMap.put(Bytes.toBytes(srcAndDest[0]), Bytes.toBytes(srcAndDest[1]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cfRenameMap;
|
return cfRenameMap;
|
||||||
|
|
|
@ -194,11 +194,11 @@ public class ImportTsv extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
String[] parts = str.split(":", 2);
|
String[] parts = str.split(":", 2);
|
||||||
if (parts.length == 1) {
|
if (parts.length == 1) {
|
||||||
families[i] = str.getBytes();
|
families[i] = Bytes.toBytes(str);
|
||||||
qualifiers[i] = HConstants.EMPTY_BYTE_ARRAY;
|
qualifiers[i] = HConstants.EMPTY_BYTE_ARRAY;
|
||||||
} else {
|
} else {
|
||||||
families[i] = parts[0].getBytes();
|
families[i] = Bytes.toBytes(parts[0]);
|
||||||
qualifiers[i] = parts[1].getBytes();
|
qualifiers[i] = Bytes.toBytes(parts[1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -471,7 +471,7 @@ public class ImportTsv extends Configured implements Tool {
|
||||||
String actualSeparator = conf.get(SEPARATOR_CONF_KEY);
|
String actualSeparator = conf.get(SEPARATOR_CONF_KEY);
|
||||||
if (actualSeparator != null) {
|
if (actualSeparator != null) {
|
||||||
conf.set(SEPARATOR_CONF_KEY,
|
conf.set(SEPARATOR_CONF_KEY,
|
||||||
Bytes.toString(Base64.getEncoder().encode(actualSeparator.getBytes())));
|
Bytes.toString(Base64.getEncoder().encode(Bytes.toBytes(actualSeparator))));
|
||||||
}
|
}
|
||||||
|
|
||||||
// See if a non-default Mapper was set
|
// See if a non-default Mapper was set
|
||||||
|
|
|
@ -31,6 +31,7 @@ import java.io.IOException;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
import java.lang.reflect.InvocationTargetException;
|
import java.lang.reflect.InvocationTargetException;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
|
@ -104,7 +105,8 @@ public class TestPerformanceEvaluation {
|
||||||
try {
|
try {
|
||||||
dis.readFully(content);
|
dis.readFully(content);
|
||||||
BufferedReader br =
|
BufferedReader br =
|
||||||
new BufferedReader(new InputStreamReader(new ByteArrayInputStream(content)));
|
new BufferedReader(new InputStreamReader(
|
||||||
|
new ByteArrayInputStream(content), StandardCharsets.UTF_8));
|
||||||
int count = 0;
|
int count = 0;
|
||||||
while (br.readLine() != null) {
|
while (br.readLine() != null) {
|
||||||
count++;
|
count++;
|
||||||
|
|
|
@ -72,9 +72,12 @@ public class TestGroupingTableMap {
|
||||||
|
|
||||||
byte[] row = {};
|
byte[] row = {};
|
||||||
List<Cell> keyValues = ImmutableList.<Cell>of(
|
List<Cell> keyValues = ImmutableList.<Cell>of(
|
||||||
new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")),
|
new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"),
|
||||||
new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("2222")),
|
Bytes.toBytes("1111")),
|
||||||
new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("3333")));
|
new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"),
|
||||||
|
Bytes.toBytes("2222")),
|
||||||
|
new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"),
|
||||||
|
Bytes.toBytes("3333")));
|
||||||
when(result.listCells()).thenReturn(keyValues);
|
when(result.listCells()).thenReturn(keyValues);
|
||||||
OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock =
|
OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock =
|
||||||
mock(OutputCollector.class);
|
mock(OutputCollector.class);
|
||||||
|
@ -102,9 +105,12 @@ public class TestGroupingTableMap {
|
||||||
|
|
||||||
byte[] row = {};
|
byte[] row = {};
|
||||||
List<Cell> keyValues = ImmutableList.<Cell>of(
|
List<Cell> keyValues = ImmutableList.<Cell>of(
|
||||||
new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")),
|
new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"),
|
||||||
new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("2222")),
|
Bytes.toBytes("1111")),
|
||||||
new KeyValue(row, "familyC".getBytes(), "qualifierC".getBytes(), Bytes.toBytes("3333")));
|
new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"),
|
||||||
|
Bytes.toBytes("2222")),
|
||||||
|
new KeyValue(row, Bytes.toBytes("familyC"), Bytes.toBytes("qualifierC"),
|
||||||
|
Bytes.toBytes("3333")));
|
||||||
when(result.listCells()).thenReturn(keyValues);
|
when(result.listCells()).thenReturn(keyValues);
|
||||||
OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock =
|
OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock =
|
||||||
mock(OutputCollector.class);
|
mock(OutputCollector.class);
|
||||||
|
@ -137,8 +143,10 @@ public class TestGroupingTableMap {
|
||||||
final byte[] secondPartKeyValue = Bytes.toBytes("35245142671437");
|
final byte[] secondPartKeyValue = Bytes.toBytes("35245142671437");
|
||||||
byte[] row = {};
|
byte[] row = {};
|
||||||
List<Cell> cells = ImmutableList.<Cell>of(
|
List<Cell> cells = ImmutableList.<Cell>of(
|
||||||
new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), firstPartKeyValue),
|
new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"),
|
||||||
new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), secondPartKeyValue));
|
firstPartKeyValue),
|
||||||
|
new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"),
|
||||||
|
secondPartKeyValue));
|
||||||
when(result.listCells()).thenReturn(cells);
|
when(result.listCells()).thenReturn(cells);
|
||||||
|
|
||||||
final AtomicBoolean outputCollected = new AtomicBoolean();
|
final AtomicBoolean outputCollected = new AtomicBoolean();
|
||||||
|
|
|
@ -106,8 +106,8 @@ public class TestSplitTable {
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
public void testToString() {
|
public void testToString() {
|
||||||
TableSplit split =
|
TableSplit split =
|
||||||
new TableSplit(TableName.valueOf(name.getMethodName()), "row-start".getBytes(), "row-end".getBytes(),
|
new TableSplit(TableName.valueOf(name.getMethodName()), Bytes.toBytes("row-start"),
|
||||||
"location");
|
Bytes.toBytes("row-end"), "location");
|
||||||
String str =
|
String str =
|
||||||
"HBase table split(table name: " + name.getMethodName() + ", start row: row-start, "
|
"HBase table split(table name: " + name.getMethodName() + ", start row: row-start, "
|
||||||
+ "end row: row-end, region location: location)";
|
+ "end row: row-end, region location: location)";
|
||||||
|
|
|
@ -124,14 +124,14 @@ public class TestTableInputFormat {
|
||||||
*/
|
*/
|
||||||
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
|
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
|
||||||
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
|
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
|
||||||
Put p = new Put("aaa".getBytes());
|
Put p = new Put(Bytes.toBytes("aaa"));
|
||||||
for (byte[] family : families) {
|
for (byte[] family : families) {
|
||||||
p.addColumn(family, null, "value aaa".getBytes());
|
p.addColumn(family, null, Bytes.toBytes("value aaa"));
|
||||||
}
|
}
|
||||||
table.put(p);
|
table.put(p);
|
||||||
p = new Put("bbb".getBytes());
|
p = new Put(Bytes.toBytes("bbb"));
|
||||||
for (byte[] family : families) {
|
for (byte[] family : families) {
|
||||||
p.addColumn(family, null, "value bbb".getBytes());
|
p.addColumn(family, null, Bytes.toBytes("value bbb"));
|
||||||
}
|
}
|
||||||
table.put(p);
|
table.put(p);
|
||||||
return table;
|
return table;
|
||||||
|
@ -165,8 +165,8 @@ public class TestTableInputFormat {
|
||||||
static void runTestMapred(Table table) throws IOException {
|
static void runTestMapred(Table table) throws IOException {
|
||||||
org.apache.hadoop.hbase.mapred.TableRecordReader trr =
|
org.apache.hadoop.hbase.mapred.TableRecordReader trr =
|
||||||
new org.apache.hadoop.hbase.mapred.TableRecordReader();
|
new org.apache.hadoop.hbase.mapred.TableRecordReader();
|
||||||
trr.setStartRow("aaa".getBytes());
|
trr.setStartRow(Bytes.toBytes("aaa"));
|
||||||
trr.setEndRow("zzz".getBytes());
|
trr.setEndRow(Bytes.toBytes("zzz"));
|
||||||
trr.setHTable(table);
|
trr.setHTable(table);
|
||||||
trr.setInputColumns(columns);
|
trr.setInputColumns(columns);
|
||||||
|
|
||||||
|
@ -176,11 +176,11 @@ public class TestTableInputFormat {
|
||||||
|
|
||||||
boolean more = trr.next(key, r);
|
boolean more = trr.next(key, r);
|
||||||
assertTrue(more);
|
assertTrue(more);
|
||||||
checkResult(r, key, "aaa".getBytes(), "value aaa".getBytes());
|
checkResult(r, key, Bytes.toBytes("aaa"), Bytes.toBytes("value aaa"));
|
||||||
|
|
||||||
more = trr.next(key, r);
|
more = trr.next(key, r);
|
||||||
assertTrue(more);
|
assertTrue(more);
|
||||||
checkResult(r, key, "bbb".getBytes(), "value bbb".getBytes());
|
checkResult(r, key, Bytes.toBytes("bbb"), Bytes.toBytes("value bbb"));
|
||||||
|
|
||||||
// no more data
|
// no more data
|
||||||
more = trr.next(key, r);
|
more = trr.next(key, r);
|
||||||
|
@ -204,7 +204,7 @@ public class TestTableInputFormat {
|
||||||
if (cnt++ < failCnt) {
|
if (cnt++ < failCnt) {
|
||||||
// create mock ResultScanner that always fails.
|
// create mock ResultScanner that always fails.
|
||||||
Scan scan = mock(Scan.class);
|
Scan scan = mock(Scan.class);
|
||||||
doReturn("bogus".getBytes()).when(scan).getStartRow(); // avoid npe
|
doReturn(Bytes.toBytes("bogus")).when(scan).getStartRow(); // avoid npe
|
||||||
ResultScanner scanner = mock(ResultScanner.class);
|
ResultScanner scanner = mock(ResultScanner.class);
|
||||||
// simulate TimeoutException / IOException
|
// simulate TimeoutException / IOException
|
||||||
doThrow(new IOException("Injected exception")).when(scanner).next();
|
doThrow(new IOException("Injected exception")).when(scanner).next();
|
||||||
|
@ -239,7 +239,7 @@ public class TestTableInputFormat {
|
||||||
if (cnt++ < failCnt) {
|
if (cnt++ < failCnt) {
|
||||||
// create mock ResultScanner that always fails.
|
// create mock ResultScanner that always fails.
|
||||||
Scan scan = mock(Scan.class);
|
Scan scan = mock(Scan.class);
|
||||||
doReturn("bogus".getBytes()).when(scan).getStartRow(); // avoid npe
|
doReturn(Bytes.toBytes("bogus")).when(scan).getStartRow(); // avoid npe
|
||||||
ResultScanner scanner = mock(ResultScanner.class);
|
ResultScanner scanner = mock(ResultScanner.class);
|
||||||
|
|
||||||
invocation.callRealMethod(); // simulate NotServingRegionException
|
invocation.callRealMethod(); // simulate NotServingRegionException
|
||||||
|
@ -266,7 +266,7 @@ public class TestTableInputFormat {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testTableRecordReader() throws IOException {
|
public void testTableRecordReader() throws IOException {
|
||||||
Table table = createTable("table1".getBytes());
|
Table table = createTable(Bytes.toBytes("table1"));
|
||||||
runTestMapred(table);
|
runTestMapred(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,7 +277,7 @@ public class TestTableInputFormat {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testTableRecordReaderScannerFail() throws IOException {
|
public void testTableRecordReaderScannerFail() throws IOException {
|
||||||
Table htable = createIOEScannerTable("table2".getBytes(), 1);
|
Table htable = createIOEScannerTable(Bytes.toBytes("table2"), 1);
|
||||||
runTestMapred(htable);
|
runTestMapred(htable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,7 +288,7 @@ public class TestTableInputFormat {
|
||||||
*/
|
*/
|
||||||
@Test(expected = IOException.class)
|
@Test(expected = IOException.class)
|
||||||
public void testTableRecordReaderScannerFailTwice() throws IOException {
|
public void testTableRecordReaderScannerFailTwice() throws IOException {
|
||||||
Table htable = createIOEScannerTable("table3".getBytes(), 2);
|
Table htable = createIOEScannerTable(Bytes.toBytes("table3"), 2);
|
||||||
runTestMapred(htable);
|
runTestMapred(htable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -299,7 +299,7 @@ public class TestTableInputFormat {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testTableRecordReaderScannerTimeout() throws IOException {
|
public void testTableRecordReaderScannerTimeout() throws IOException {
|
||||||
Table htable = createDNRIOEScannerTable("table4".getBytes(), 1);
|
Table htable = createDNRIOEScannerTable(Bytes.toBytes("table4"), 1);
|
||||||
runTestMapred(htable);
|
runTestMapred(htable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,7 +310,7 @@ public class TestTableInputFormat {
|
||||||
*/
|
*/
|
||||||
@Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class)
|
@Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class)
|
||||||
public void testTableRecordReaderScannerTimeoutTwice() throws IOException {
|
public void testTableRecordReaderScannerTimeoutTwice() throws IOException {
|
||||||
Table htable = createDNRIOEScannerTable("table5".getBytes(), 2);
|
Table htable = createDNRIOEScannerTable(Bytes.toBytes("table5"), 2);
|
||||||
runTestMapred(htable);
|
runTestMapred(htable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -301,7 +301,7 @@ public class TestCopyTable {
|
||||||
assertNotNull(t2.get(new Get(ROW1)).getRow());
|
assertNotNull(t2.get(new Get(ROW1)).getRow());
|
||||||
Result res = t2.get(new Get(ROW1));
|
Result res = t2.get(new Get(ROW1));
|
||||||
byte[] b1 = res.getValue(FAMILY_B, QUALIFIER);
|
byte[] b1 = res.getValue(FAMILY_B, QUALIFIER);
|
||||||
assertEquals("Data13", new String(b1));
|
assertEquals("Data13", Bytes.toString(b1));
|
||||||
assertNotNull(t2.get(new Get(ROW2)).getRow());
|
assertNotNull(t2.get(new Get(ROW2)).getRow());
|
||||||
res = t2.get(new Get(ROW2));
|
res = t2.get(new Get(ROW2));
|
||||||
b1 = res.getValue(FAMILY_A, QUALIFIER);
|
b1 = res.getValue(FAMILY_A, QUALIFIER);
|
||||||
|
|
|
@ -415,7 +415,7 @@ public class TestImportExport {
|
||||||
|
|
||||||
//Add second version of QUAL
|
//Add second version of QUAL
|
||||||
p = new Put(ROW1);
|
p = new Put(ROW1);
|
||||||
p.addColumn(FAMILYA, QUAL, now + 5, "s".getBytes());
|
p.addColumn(FAMILYA, QUAL, now + 5, Bytes.toBytes("s"));
|
||||||
exportT.put(p);
|
exportT.put(p);
|
||||||
|
|
||||||
//Add second Delete family marker
|
//Add second Delete family marker
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
@ -66,11 +67,11 @@ public class TestRegionSizeCalculator {
|
||||||
|
|
||||||
RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin);
|
RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin);
|
||||||
|
|
||||||
assertEquals(123 * megabyte, calculator.getRegionSize("region1".getBytes()));
|
assertEquals(123 * megabyte, calculator.getRegionSize(Bytes.toBytes("region1")));
|
||||||
assertEquals(54321 * megabyte, calculator.getRegionSize("region2".getBytes()));
|
assertEquals(54321 * megabyte, calculator.getRegionSize(Bytes.toBytes("region2")));
|
||||||
assertEquals(1232 * megabyte, calculator.getRegionSize("region3".getBytes()));
|
assertEquals(1232 * megabyte, calculator.getRegionSize(Bytes.toBytes("region3")));
|
||||||
// if regionCalculator does not know about a region, it should return 0
|
// if regionCalculator does not know about a region, it should return 0
|
||||||
assertEquals(0 * megabyte, calculator.getRegionSize("otherTableRegion".getBytes()));
|
assertEquals(0 * megabyte, calculator.getRegionSize(Bytes.toBytes("otherTableRegion")));
|
||||||
|
|
||||||
assertEquals(3, calculator.getRegionSizeMap().size());
|
assertEquals(3, calculator.getRegionSizeMap().size());
|
||||||
}
|
}
|
||||||
|
@ -91,7 +92,8 @@ public class TestRegionSizeCalculator {
|
||||||
|
|
||||||
RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin);
|
RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin);
|
||||||
|
|
||||||
assertEquals(((long) Integer.MAX_VALUE) * megabyte, calculator.getRegionSize("largeRegion".getBytes()));
|
assertEquals(((long) Integer.MAX_VALUE) * megabyte,
|
||||||
|
calculator.getRegionSize(Bytes.toBytes("largeRegion")));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** When calculator is disabled, it should return 0 for each request.*/
|
/** When calculator is disabled, it should return 0 for each request.*/
|
||||||
|
@ -106,12 +108,12 @@ public class TestRegionSizeCalculator {
|
||||||
|
|
||||||
//first request on enabled calculator
|
//first request on enabled calculator
|
||||||
RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin);
|
RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin);
|
||||||
assertEquals(999 * megabyte, calculator.getRegionSize(regionName.getBytes()));
|
assertEquals(999 * megabyte, calculator.getRegionSize(Bytes.toBytes(regionName)));
|
||||||
|
|
||||||
//then disabled calculator.
|
//then disabled calculator.
|
||||||
configuration.setBoolean(RegionSizeCalculator.ENABLE_REGIONSIZECALCULATOR, false);
|
configuration.setBoolean(RegionSizeCalculator.ENABLE_REGIONSIZECALCULATOR, false);
|
||||||
RegionSizeCalculator disabledCalculator = new RegionSizeCalculator(table, admin);
|
RegionSizeCalculator disabledCalculator = new RegionSizeCalculator(table, admin);
|
||||||
assertEquals(0 * megabyte, disabledCalculator.getRegionSize(regionName.getBytes()));
|
assertEquals(0 * megabyte, disabledCalculator.getRegionSize(Bytes.toBytes(regionName)));
|
||||||
|
|
||||||
assertEquals(0, disabledCalculator.getRegionSizeMap().size());
|
assertEquals(0, disabledCalculator.getRegionSizeMap().size());
|
||||||
}
|
}
|
||||||
|
@ -127,7 +129,7 @@ public class TestRegionSizeCalculator {
|
||||||
|
|
||||||
for (String regionName : regionNames) {
|
for (String regionName : regionNames) {
|
||||||
HRegionInfo info = Mockito.mock(HRegionInfo.class);
|
HRegionInfo info = Mockito.mock(HRegionInfo.class);
|
||||||
when(info.getRegionName()).thenReturn(regionName.getBytes());
|
when(info.getRegionName()).thenReturn(Bytes.toBytes(regionName));
|
||||||
regionLocations.add(new HRegionLocation(info, sn));
|
regionLocations.add(new HRegionLocation(info, sn));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,7 +158,7 @@ public class TestRegionSizeCalculator {
|
||||||
* */
|
* */
|
||||||
private RegionMetrics mockRegion(String regionName, int fileSizeMb) {
|
private RegionMetrics mockRegion(String regionName, int fileSizeMb) {
|
||||||
RegionMetrics region = Mockito.mock(RegionMetrics.class);
|
RegionMetrics region = Mockito.mock(RegionMetrics.class);
|
||||||
when(region.getRegionName()).thenReturn(regionName.getBytes());
|
when(region.getRegionName()).thenReturn(Bytes.toBytes(regionName));
|
||||||
when(region.getNameAsString()).thenReturn(regionName);
|
when(region.getNameAsString()).thenReturn(regionName);
|
||||||
when(region.getStoreFileSize()).thenReturn(new Size(fileSizeMb, Size.Unit.MEGABYTE));
|
when(region.getStoreFileSize()).thenReturn(new Size(fileSizeMb, Size.Unit.MEGABYTE));
|
||||||
return region;
|
return region;
|
||||||
|
|
|
@ -121,14 +121,14 @@ public class TestTableInputFormat {
|
||||||
*/
|
*/
|
||||||
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
|
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
|
||||||
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
|
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
|
||||||
Put p = new Put("aaa".getBytes());
|
Put p = new Put(Bytes.toBytes("aaa"));
|
||||||
for (byte[] family : families) {
|
for (byte[] family : families) {
|
||||||
p.addColumn(family, null, "value aaa".getBytes());
|
p.addColumn(family, null, Bytes.toBytes("value aaa"));
|
||||||
}
|
}
|
||||||
table.put(p);
|
table.put(p);
|
||||||
p = new Put("bbb".getBytes());
|
p = new Put(Bytes.toBytes("bbb"));
|
||||||
for (byte[] family : families) {
|
for (byte[] family : families) {
|
||||||
p.addColumn(family, null, "value bbb".getBytes());
|
p.addColumn(family, null, Bytes.toBytes("value bbb"));
|
||||||
}
|
}
|
||||||
table.put(p);
|
table.put(p);
|
||||||
return table;
|
return table;
|
||||||
|
@ -165,8 +165,8 @@ public class TestTableInputFormat {
|
||||||
org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr =
|
org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr =
|
||||||
new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl();
|
new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl();
|
||||||
Scan s = new Scan();
|
Scan s = new Scan();
|
||||||
s.setStartRow("aaa".getBytes());
|
s.setStartRow(Bytes.toBytes("aaa"));
|
||||||
s.setStopRow("zzz".getBytes());
|
s.setStopRow(Bytes.toBytes("zzz"));
|
||||||
s.addFamily(FAMILY);
|
s.addFamily(FAMILY);
|
||||||
trr.setScan(s);
|
trr.setScan(s);
|
||||||
trr.setHTable(table);
|
trr.setHTable(table);
|
||||||
|
@ -179,13 +179,13 @@ public class TestTableInputFormat {
|
||||||
assertTrue(more);
|
assertTrue(more);
|
||||||
key = trr.getCurrentKey();
|
key = trr.getCurrentKey();
|
||||||
r = trr.getCurrentValue();
|
r = trr.getCurrentValue();
|
||||||
checkResult(r, key, "aaa".getBytes(), "value aaa".getBytes());
|
checkResult(r, key, Bytes.toBytes("aaa"), Bytes.toBytes("value aaa"));
|
||||||
|
|
||||||
more = trr.nextKeyValue();
|
more = trr.nextKeyValue();
|
||||||
assertTrue(more);
|
assertTrue(more);
|
||||||
key = trr.getCurrentKey();
|
key = trr.getCurrentKey();
|
||||||
r = trr.getCurrentValue();
|
r = trr.getCurrentValue();
|
||||||
checkResult(r, key, "bbb".getBytes(), "value bbb".getBytes());
|
checkResult(r, key, Bytes.toBytes("bbb"), Bytes.toBytes("value bbb"));
|
||||||
|
|
||||||
// no more data
|
// no more data
|
||||||
more = trr.nextKeyValue();
|
more = trr.nextKeyValue();
|
||||||
|
@ -209,7 +209,7 @@ public class TestTableInputFormat {
|
||||||
if (cnt++ < failCnt) {
|
if (cnt++ < failCnt) {
|
||||||
// create mock ResultScanner that always fails.
|
// create mock ResultScanner that always fails.
|
||||||
Scan scan = mock(Scan.class);
|
Scan scan = mock(Scan.class);
|
||||||
doReturn("bogus".getBytes()).when(scan).getStartRow(); // avoid npe
|
doReturn(Bytes.toBytes("bogus")).when(scan).getStartRow(); // avoid npe
|
||||||
ResultScanner scanner = mock(ResultScanner.class);
|
ResultScanner scanner = mock(ResultScanner.class);
|
||||||
// simulate TimeoutException / IOException
|
// simulate TimeoutException / IOException
|
||||||
doThrow(new IOException("Injected exception")).when(scanner).next();
|
doThrow(new IOException("Injected exception")).when(scanner).next();
|
||||||
|
@ -244,7 +244,7 @@ public class TestTableInputFormat {
|
||||||
if (cnt++ < failCnt) {
|
if (cnt++ < failCnt) {
|
||||||
// create mock ResultScanner that always fails.
|
// create mock ResultScanner that always fails.
|
||||||
Scan scan = mock(Scan.class);
|
Scan scan = mock(Scan.class);
|
||||||
doReturn("bogus".getBytes()).when(scan).getStartRow(); // avoid npe
|
doReturn(Bytes.toBytes("bogus")).when(scan).getStartRow(); // avoid npe
|
||||||
ResultScanner scanner = mock(ResultScanner.class);
|
ResultScanner scanner = mock(ResultScanner.class);
|
||||||
|
|
||||||
invocation.callRealMethod(); // simulate NotServingRegionException
|
invocation.callRealMethod(); // simulate NotServingRegionException
|
||||||
|
@ -273,7 +273,7 @@ public class TestTableInputFormat {
|
||||||
@Test
|
@Test
|
||||||
public void testTableRecordReaderMapreduce() throws IOException,
|
public void testTableRecordReaderMapreduce() throws IOException,
|
||||||
InterruptedException {
|
InterruptedException {
|
||||||
Table table = createTable("table1-mr".getBytes());
|
Table table = createTable(Bytes.toBytes("table1-mr"));
|
||||||
runTestMapreduce(table);
|
runTestMapreduce(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -286,7 +286,7 @@ public class TestTableInputFormat {
|
||||||
@Test
|
@Test
|
||||||
public void testTableRecordReaderScannerFailMapreduce() throws IOException,
|
public void testTableRecordReaderScannerFailMapreduce() throws IOException,
|
||||||
InterruptedException {
|
InterruptedException {
|
||||||
Table htable = createIOEScannerTable("table2-mr".getBytes(), 1);
|
Table htable = createIOEScannerTable(Bytes.toBytes("table2-mr"), 1);
|
||||||
runTestMapreduce(htable);
|
runTestMapreduce(htable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -299,7 +299,7 @@ public class TestTableInputFormat {
|
||||||
@Test(expected = IOException.class)
|
@Test(expected = IOException.class)
|
||||||
public void testTableRecordReaderScannerFailMapreduceTwice() throws IOException,
|
public void testTableRecordReaderScannerFailMapreduceTwice() throws IOException,
|
||||||
InterruptedException {
|
InterruptedException {
|
||||||
Table htable = createIOEScannerTable("table3-mr".getBytes(), 2);
|
Table htable = createIOEScannerTable(Bytes.toBytes("table3-mr"), 2);
|
||||||
runTestMapreduce(htable);
|
runTestMapreduce(htable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -312,7 +312,7 @@ public class TestTableInputFormat {
|
||||||
@Test
|
@Test
|
||||||
public void testTableRecordReaderScannerTimeoutMapreduce()
|
public void testTableRecordReaderScannerTimeoutMapreduce()
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
Table htable = createDNRIOEScannerTable("table4-mr".getBytes(), 1);
|
Table htable = createDNRIOEScannerTable(Bytes.toBytes("table4-mr"), 1);
|
||||||
runTestMapreduce(htable);
|
runTestMapreduce(htable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ public class TestTableInputFormat {
|
||||||
@Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class)
|
@Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class)
|
||||||
public void testTableRecordReaderScannerTimeoutMapreduceTwice()
|
public void testTableRecordReaderScannerTimeoutMapreduceTwice()
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
Table htable = createDNRIOEScannerTable("table5-mr".getBytes(), 2);
|
Table htable = createDNRIOEScannerTable(Bytes.toBytes("table5-mr"), 2);
|
||||||
runTestMapreduce(htable);
|
runTestMapreduce(htable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -47,11 +48,11 @@ public class TestTableSplit {
|
||||||
@Test
|
@Test
|
||||||
public void testHashCode() {
|
public void testHashCode() {
|
||||||
TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
||||||
"row-start".getBytes(),
|
Bytes.toBytes("row-start"),
|
||||||
"row-end".getBytes(), "location");
|
Bytes.toBytes("row-end"), "location");
|
||||||
TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
||||||
"row-start".getBytes(),
|
Bytes.toBytes("row-start"),
|
||||||
"row-end".getBytes(), "location");
|
Bytes.toBytes("row-end"), "location");
|
||||||
assertEquals (split1, split2);
|
assertEquals (split1, split2);
|
||||||
assertTrue (split1.hashCode() == split2.hashCode());
|
assertTrue (split1.hashCode() == split2.hashCode());
|
||||||
HashSet<TableSplit> set = new HashSet<>(2);
|
HashSet<TableSplit> set = new HashSet<>(2);
|
||||||
|
@ -66,11 +67,11 @@ public class TestTableSplit {
|
||||||
@Test
|
@Test
|
||||||
public void testHashCode_length() {
|
public void testHashCode_length() {
|
||||||
TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
||||||
"row-start".getBytes(),
|
Bytes.toBytes("row-start"),
|
||||||
"row-end".getBytes(), "location", 1984);
|
Bytes.toBytes("row-end"), "location", 1984);
|
||||||
TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
||||||
"row-start".getBytes(),
|
Bytes.toBytes("row-start"),
|
||||||
"row-end".getBytes(), "location", 1982);
|
Bytes.toBytes("row-end"), "location", 1982);
|
||||||
|
|
||||||
assertEquals (split1, split2);
|
assertEquals (split1, split2);
|
||||||
assertTrue (split1.hashCode() == split2.hashCode());
|
assertTrue (split1.hashCode() == split2.hashCode());
|
||||||
|
@ -86,12 +87,12 @@ public class TestTableSplit {
|
||||||
@Test
|
@Test
|
||||||
public void testLengthIsSerialized() throws Exception {
|
public void testLengthIsSerialized() throws Exception {
|
||||||
TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()),
|
||||||
"row-start".getBytes(),
|
Bytes.toBytes("row-start"),
|
||||||
"row-end".getBytes(), "location", 666);
|
Bytes.toBytes("row-end"), "location", 666);
|
||||||
|
|
||||||
TableSplit deserialized = new TableSplit(TableName.valueOf(name.getMethodName()),
|
TableSplit deserialized = new TableSplit(TableName.valueOf(name.getMethodName()),
|
||||||
"row-start2".getBytes(),
|
Bytes.toBytes("row-start2"),
|
||||||
"row-end2".getBytes(), "location1");
|
Bytes.toBytes("row-end2"), "location1");
|
||||||
ReflectionUtils.copy(new Configuration(), split1, deserialized);
|
ReflectionUtils.copy(new Configuration(), split1, deserialized);
|
||||||
|
|
||||||
Assert.assertEquals(666, deserialized.getLength());
|
Assert.assertEquals(666, deserialized.getLength());
|
||||||
|
@ -100,8 +101,8 @@ public class TestTableSplit {
|
||||||
@Test
|
@Test
|
||||||
public void testToString() {
|
public void testToString() {
|
||||||
TableSplit split =
|
TableSplit split =
|
||||||
new TableSplit(TableName.valueOf(name.getMethodName()), "row-start".getBytes(), "row-end".getBytes(),
|
new TableSplit(TableName.valueOf(name.getMethodName()), Bytes.toBytes("row-start"),
|
||||||
"location");
|
Bytes.toBytes("row-end"), "location");
|
||||||
String str =
|
String str =
|
||||||
"HBase table split(table name: " + name.getMethodName() + ", scan: , start row: row-start, "
|
"HBase table split(table name: " + name.getMethodName() + ", scan: , start row: row-start, "
|
||||||
+ "end row: row-end, region location: location, "
|
+ "end row: row-end, region location: location, "
|
||||||
|
@ -109,8 +110,8 @@ public class TestTableSplit {
|
||||||
Assert.assertEquals(str, split.toString());
|
Assert.assertEquals(str, split.toString());
|
||||||
|
|
||||||
split =
|
split =
|
||||||
new TableSplit(TableName.valueOf(name.getMethodName()), null, "row-start".getBytes(),
|
new TableSplit(TableName.valueOf(name.getMethodName()), null, Bytes.toBytes("row-start"),
|
||||||
"row-end".getBytes(), "location", "encoded-region-name", 1000L);
|
Bytes.toBytes("row-end"), "location", "encoded-region-name", 1000L);
|
||||||
str =
|
str =
|
||||||
"HBase table split(table name: " + name.getMethodName() + ", scan: , start row: row-start, "
|
"HBase table split(table name: " + name.getMethodName() + ", scan: , start row: row-start, "
|
||||||
+ "end row: row-end, region location: location, "
|
+ "end row: row-end, region location: location, "
|
||||||
|
|
|
@ -168,15 +168,15 @@ public class TestVerifyReplicationCrossDiffHdfs {
|
||||||
Path rootDir = FSUtils.getRootDir(conf1);
|
Path rootDir = FSUtils.getRootDir(conf1);
|
||||||
FileSystem fs = rootDir.getFileSystem(conf1);
|
FileSystem fs = rootDir.getFileSystem(conf1);
|
||||||
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
|
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
|
||||||
SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME, new String(FAMILY),
|
SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME,
|
||||||
sourceSnapshotName, rootDir, fs, true);
|
Bytes.toString(FAMILY), sourceSnapshotName, rootDir, fs, true);
|
||||||
|
|
||||||
// Take target snapshot
|
// Take target snapshot
|
||||||
Path peerRootDir = FSUtils.getRootDir(conf2);
|
Path peerRootDir = FSUtils.getRootDir(conf2);
|
||||||
FileSystem peerFs = peerRootDir.getFileSystem(conf2);
|
FileSystem peerFs = peerRootDir.getFileSystem(conf2);
|
||||||
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
|
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
|
||||||
SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME, new String(FAMILY),
|
SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME,
|
||||||
peerSnapshotName, peerRootDir, peerFs, true);
|
Bytes.toString(FAMILY), peerSnapshotName, peerRootDir, peerFs, true);
|
||||||
|
|
||||||
String peerFSAddress = peerFs.getUri().toString();
|
String peerFSAddress = peerFs.getUri().toString();
|
||||||
String temPath1 = new Path(fs.getUri().toString(), "/tmp1").toString();
|
String temPath1 = new Path(fs.getUri().toString(), "/tmp1").toString();
|
||||||
|
|
|
@ -61,11 +61,11 @@ public class TestSerialization {
|
||||||
|
|
||||||
@Test public void testKeyValue() throws Exception {
|
@Test public void testKeyValue() throws Exception {
|
||||||
final String name = "testKeyValue2";
|
final String name = "testKeyValue2";
|
||||||
byte[] row = name.getBytes();
|
byte[] row = Bytes.toBytes(name);
|
||||||
byte[] fam = "fam".getBytes();
|
byte[] fam = Bytes.toBytes("fam");
|
||||||
byte[] qf = "qf".getBytes();
|
byte[] qf = Bytes.toBytes("qf");
|
||||||
long ts = System.currentTimeMillis();
|
long ts = System.currentTimeMillis();
|
||||||
byte[] val = "val".getBytes();
|
byte[] val = Bytes.toBytes("val");
|
||||||
KeyValue kv = new KeyValue(row, fam, qf, ts, val);
|
KeyValue kv = new KeyValue(row, fam, qf, ts, val);
|
||||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||||
DataOutputStream dos = new DataOutputStream(baos);
|
DataOutputStream dos = new DataOutputStream(baos);
|
||||||
|
@ -290,9 +290,9 @@ public class TestSerialization {
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@Test public void testGet() throws Exception{
|
@Test public void testGet() throws Exception{
|
||||||
byte[] row = "row".getBytes();
|
byte[] row = Bytes.toBytes("row");
|
||||||
byte[] fam = "fam".getBytes();
|
byte[] fam = Bytes.toBytes("fam");
|
||||||
byte[] qf1 = "qf1".getBytes();
|
byte[] qf1 = Bytes.toBytes("qf1");
|
||||||
|
|
||||||
long ts = System.currentTimeMillis();
|
long ts = System.currentTimeMillis();
|
||||||
int maxVersions = 2;
|
int maxVersions = 2;
|
||||||
|
@ -329,10 +329,10 @@ public class TestSerialization {
|
||||||
|
|
||||||
@Test public void testScan() throws Exception {
|
@Test public void testScan() throws Exception {
|
||||||
|
|
||||||
byte[] startRow = "startRow".getBytes();
|
byte[] startRow = Bytes.toBytes("startRow");
|
||||||
byte[] stopRow = "stopRow".getBytes();
|
byte[] stopRow = Bytes.toBytes("stopRow");
|
||||||
byte[] fam = "fam".getBytes();
|
byte[] fam = Bytes.toBytes("fam");
|
||||||
byte[] qf1 = "qf1".getBytes();
|
byte[] qf1 = Bytes.toBytes("qf1");
|
||||||
|
|
||||||
long ts = System.currentTimeMillis();
|
long ts = System.currentTimeMillis();
|
||||||
int maxVersions = 2;
|
int maxVersions = 2;
|
||||||
|
|
|
@ -686,7 +686,7 @@ public class TestAdmin1 {
|
||||||
TableName TABLE_3 = TableName.valueOf(tableName.getNameAsString() + "_3");
|
TableName TABLE_3 = TableName.valueOf(tableName.getNameAsString() + "_3");
|
||||||
desc = new HTableDescriptor(TABLE_3);
|
desc = new HTableDescriptor(TABLE_3);
|
||||||
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
||||||
admin.createTable(desc, "a".getBytes(), "z".getBytes(), 3);
|
admin.createTable(desc, Bytes.toBytes("a"), Bytes.toBytes("z"), 3);
|
||||||
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_3)) {
|
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_3)) {
|
||||||
regions = l.getAllRegionLocations();
|
regions = l.getAllRegionLocations();
|
||||||
assertEquals("Table should have only 3 region", 3, regions.size());
|
assertEquals("Table should have only 3 region", 3, regions.size());
|
||||||
|
@ -696,7 +696,7 @@ public class TestAdmin1 {
|
||||||
desc = new HTableDescriptor(TABLE_4);
|
desc = new HTableDescriptor(TABLE_4);
|
||||||
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
||||||
try {
|
try {
|
||||||
admin.createTable(desc, "a".getBytes(), "z".getBytes(), 2);
|
admin.createTable(desc, Bytes.toBytes("a"), Bytes.toBytes("z"), 2);
|
||||||
fail("Should not be able to create a table with only 2 regions using this API.");
|
fail("Should not be able to create a table with only 2 regions using this API.");
|
||||||
} catch (IllegalArgumentException eae) {
|
} catch (IllegalArgumentException eae) {
|
||||||
// Expected
|
// Expected
|
||||||
|
@ -921,9 +921,9 @@ public class TestAdmin1 {
|
||||||
public void testCreateTableWithEmptyRowInTheSplitKeys() throws IOException{
|
public void testCreateTableWithEmptyRowInTheSplitKeys() throws IOException{
|
||||||
final byte[] tableName = Bytes.toBytes(name.getMethodName());
|
final byte[] tableName = Bytes.toBytes(name.getMethodName());
|
||||||
byte[][] splitKeys = new byte[3][];
|
byte[][] splitKeys = new byte[3][];
|
||||||
splitKeys[0] = "region1".getBytes();
|
splitKeys[0] = Bytes.toBytes("region1");
|
||||||
splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY;
|
splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY;
|
||||||
splitKeys[2] = "region2".getBytes();
|
splitKeys[2] = Bytes.toBytes("region2");
|
||||||
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
|
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
|
||||||
desc.addFamily(new HColumnDescriptor("col"));
|
desc.addFamily(new HColumnDescriptor("col"));
|
||||||
try {
|
try {
|
||||||
|
@ -1181,7 +1181,7 @@ public class TestAdmin1 {
|
||||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||||
desc.setRegionReplication(3);
|
desc.setRegionReplication(3);
|
||||||
byte[] cf = "f".getBytes();
|
byte[] cf = Bytes.toBytes("f");
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(cf);
|
HColumnDescriptor hcd = new HColumnDescriptor(cf);
|
||||||
desc.addFamily(hcd);
|
desc.addFamily(hcd);
|
||||||
byte[][] splitRows = new byte[2][];
|
byte[][] splitRows = new byte[2][];
|
||||||
|
@ -1196,15 +1196,15 @@ public class TestAdmin1 {
|
||||||
// write some data to the table
|
// write some data to the table
|
||||||
Table ht = TEST_UTIL.getConnection().getTable(tableName);
|
Table ht = TEST_UTIL.getConnection().getTable(tableName);
|
||||||
List<Put> puts = new ArrayList<>();
|
List<Put> puts = new ArrayList<>();
|
||||||
byte[] qualifier = "c".getBytes();
|
byte[] qualifier = Bytes.toBytes("c");
|
||||||
Put put = new Put(new byte[]{(byte)'1'});
|
Put put = new Put(new byte[]{(byte)'1'});
|
||||||
put.addColumn(cf, qualifier, "100".getBytes());
|
put.addColumn(cf, qualifier, Bytes.toBytes("100"));
|
||||||
puts.add(put);
|
puts.add(put);
|
||||||
put = new Put(new byte[]{(byte)'6'});
|
put = new Put(new byte[]{(byte)'6'});
|
||||||
put.addColumn(cf, qualifier, "100".getBytes());
|
put.addColumn(cf, qualifier, Bytes.toBytes("100"));
|
||||||
puts.add(put);
|
puts.add(put);
|
||||||
put = new Put(new byte[]{(byte)'8'});
|
put = new Put(new byte[]{(byte)'8'});
|
||||||
put.addColumn(cf, qualifier, "100".getBytes());
|
put.addColumn(cf, qualifier, Bytes.toBytes("100"));
|
||||||
puts.add(put);
|
puts.add(put);
|
||||||
ht.put(puts);
|
ht.put(puts);
|
||||||
ht.close();
|
ht.close();
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
||||||
final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "_3");
|
final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "_3");
|
||||||
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName3);
|
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName3);
|
||||||
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
|
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
|
||||||
admin.createTable(builder.build(), "a".getBytes(), "z".getBytes(), 3).join();
|
admin.createTable(builder.build(), Bytes.toBytes("a"), Bytes.toBytes("z"), 3).join();
|
||||||
regionLocations =
|
regionLocations =
|
||||||
AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, Optional.of(tableName3)).get();
|
AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, Optional.of(tableName3)).get();
|
||||||
assertEquals("Table should have only 3 region", 3, regionLocations.size());
|
assertEquals("Table should have only 3 region", 3, regionLocations.size());
|
||||||
|
@ -109,7 +109,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
||||||
builder = TableDescriptorBuilder.newBuilder(tableName4);
|
builder = TableDescriptorBuilder.newBuilder(tableName4);
|
||||||
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
|
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
|
||||||
try {
|
try {
|
||||||
admin.createTable(builder.build(), "a".getBytes(), "z".getBytes(), 2).join();
|
admin.createTable(builder.build(), Bytes.toBytes("a"), Bytes.toBytes("z"), 2).join();
|
||||||
fail("Should not be able to create a table with only 2 regions using this API.");
|
fail("Should not be able to create a table with only 2 regions using this API.");
|
||||||
} catch (CompletionException e) {
|
} catch (CompletionException e) {
|
||||||
assertTrue(e.getCause() instanceof IllegalArgumentException);
|
assertTrue(e.getCause() instanceof IllegalArgumentException);
|
||||||
|
@ -309,9 +309,9 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
||||||
@Test
|
@Test
|
||||||
public void testCreateTableWithEmptyRowInTheSplitKeys() throws Exception {
|
public void testCreateTableWithEmptyRowInTheSplitKeys() throws Exception {
|
||||||
byte[][] splitKeys = new byte[3][];
|
byte[][] splitKeys = new byte[3][];
|
||||||
splitKeys[0] = "region1".getBytes();
|
splitKeys[0] = Bytes.toBytes("region1");
|
||||||
splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY;
|
splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY;
|
||||||
splitKeys[2] = "region2".getBytes();
|
splitKeys[2] = Bytes.toBytes("region2");
|
||||||
try {
|
try {
|
||||||
createTableWithDefaultConf(tableName, splitKeys);
|
createTableWithDefaultConf(tableName, splitKeys);
|
||||||
fail("Test case should fail as empty split key is passed.");
|
fail("Test case should fail as empty split key is passed.");
|
||||||
|
|
|
@ -218,8 +218,8 @@ public class TestConnectionImplementation {
|
||||||
// dead servers is broke"
|
// dead servers is broke"
|
||||||
public void testClusterStatus() throws Exception {
|
public void testClusterStatus() throws Exception {
|
||||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
byte[] cf = "cf".getBytes();
|
byte[] cf = Bytes.toBytes("cf");
|
||||||
byte[] rk = "rk1".getBytes();
|
byte[] rk = Bytes.toBytes("rk1");
|
||||||
|
|
||||||
JVMClusterUtil.RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
|
JVMClusterUtil.RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
|
||||||
rs.waitForServerOnline();
|
rs.waitForServerOnline();
|
||||||
|
@ -242,7 +242,7 @@ public class TestConnectionImplementation {
|
||||||
}
|
}
|
||||||
|
|
||||||
Put p1 = new Put(rk);
|
Put p1 = new Put(rk);
|
||||||
p1.addColumn(cf, "qual".getBytes(), "val".getBytes());
|
p1.addColumn(cf, Bytes.toBytes("qual"), Bytes.toBytes("val"));
|
||||||
t.put(p1);
|
t.put(p1);
|
||||||
|
|
||||||
rs.getRegionServer().abort("I'm dead");
|
rs.getRegionServer().abort("I'm dead");
|
||||||
|
@ -606,7 +606,7 @@ public class TestConnectionImplementation {
|
||||||
LOG.info("Move starting region="+toMove.getRegionInfo().getRegionNameAsString());
|
LOG.info("Move starting region="+toMove.getRegionInfo().getRegionNameAsString());
|
||||||
TEST_UTIL.getAdmin().move(
|
TEST_UTIL.getAdmin().move(
|
||||||
toMove.getRegionInfo().getEncodedNameAsBytes(),
|
toMove.getRegionInfo().getEncodedNameAsBytes(),
|
||||||
destServerName.getServerName().getBytes()
|
Bytes.toBytes(destServerName.getServerName())
|
||||||
);
|
);
|
||||||
|
|
||||||
while (destServer.getOnlineRegion(regionName) == null ||
|
while (destServer.getOnlineRegion(regionName) == null ||
|
||||||
|
@ -672,7 +672,7 @@ public class TestConnectionImplementation {
|
||||||
LOG.info("Move starting region=" + toMove.getRegionInfo().getRegionNameAsString());
|
LOG.info("Move starting region=" + toMove.getRegionInfo().getRegionNameAsString());
|
||||||
TEST_UTIL.getAdmin().move(
|
TEST_UTIL.getAdmin().move(
|
||||||
toMove.getRegionInfo().getEncodedNameAsBytes(),
|
toMove.getRegionInfo().getEncodedNameAsBytes(),
|
||||||
curServer.getServerName().getServerName().getBytes()
|
Bytes.toBytes(curServer.getServerName().getServerName())
|
||||||
);
|
);
|
||||||
|
|
||||||
while (curServer.getOnlineRegion(regionName) == null ||
|
while (curServer.getOnlineRegion(regionName) == null ||
|
||||||
|
@ -930,7 +930,7 @@ public class TestConnectionImplementation {
|
||||||
LOG.info("Move starting region=" + toMove.getRegionInfo().getRegionNameAsString());
|
LOG.info("Move starting region=" + toMove.getRegionInfo().getRegionNameAsString());
|
||||||
TEST_UTIL.getAdmin().move(
|
TEST_UTIL.getAdmin().move(
|
||||||
toMove.getRegionInfo().getEncodedNameAsBytes(),
|
toMove.getRegionInfo().getEncodedNameAsBytes(),
|
||||||
destServerName.getServerName().getBytes()
|
Bytes.toBytes(destServerName.getServerName())
|
||||||
);
|
);
|
||||||
|
|
||||||
while (destServer.getOnlineRegion(regionName) == null ||
|
while (destServer.getOnlineRegion(regionName) == null ||
|
||||||
|
|
|
@ -367,7 +367,7 @@ public class TestFastFail {
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte[] longToByteArrayKey(long rowKey) {
|
private byte[] longToByteArrayKey(long rowKey) {
|
||||||
return LoadTestKVGenerator.md5PrefixedKey(rowKey).getBytes();
|
return Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(rowKey));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class CallQueueTooBigPffeInterceptor extends
|
public static class CallQueueTooBigPffeInterceptor extends
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -77,7 +78,7 @@ public class TestGetScanPartialResult {
|
||||||
byte[] val = makeLargeValue(VALUE_SIZE);
|
byte[] val = makeLargeValue(VALUE_SIZE);
|
||||||
Put p = new Put(ROW);
|
Put p = new Put(ROW);
|
||||||
for (int i = 0; i < NUM_COLUMNS; i++) {
|
for (int i = 0; i < NUM_COLUMNS; i++) {
|
||||||
p.addColumn(CF, Integer.toString(i).getBytes(), val);
|
p.addColumn(CF, Bytes.toBytes(Integer.toString(i)), val);
|
||||||
}
|
}
|
||||||
t.put(p);
|
t.put(p);
|
||||||
|
|
||||||
|
|
|
@ -258,10 +258,10 @@ public class TestMetaWithReplicas {
|
||||||
LOG.info("Running GETs");
|
LOG.info("Running GETs");
|
||||||
Get get = null;
|
Get get = null;
|
||||||
Result r = null;
|
Result r = null;
|
||||||
byte[] row = "test".getBytes();
|
byte[] row = Bytes.toBytes("test");
|
||||||
try (Table htable = c.getTable(TABLE)) {
|
try (Table htable = c.getTable(TABLE)) {
|
||||||
Put put = new Put(row);
|
Put put = new Put(row);
|
||||||
put.addColumn("foo".getBytes(), row, row);
|
put.addColumn(Bytes.toBytes("foo"), row, row);
|
||||||
BufferedMutator m = c.getBufferedMutator(TABLE);
|
BufferedMutator m = c.getBufferedMutator(TABLE);
|
||||||
m.mutate(put);
|
m.mutate(put);
|
||||||
m.flush();
|
m.flush();
|
||||||
|
@ -296,7 +296,7 @@ public class TestMetaWithReplicas {
|
||||||
TEST_UTIL.getAdmin().deleteTable(tableName);
|
TEST_UTIL.getAdmin().deleteTable(tableName);
|
||||||
}
|
}
|
||||||
try (Table htable = TEST_UTIL.createTable(tableName, FAMILIES)) {
|
try (Table htable = TEST_UTIL.createTable(tableName, FAMILIES)) {
|
||||||
byte[] row = "test".getBytes();
|
byte[] row = Bytes.toBytes("test");
|
||||||
ConnectionImplementation c = ((ConnectionImplementation) TEST_UTIL.getConnection());
|
ConnectionImplementation c = ((ConnectionImplementation) TEST_UTIL.getConnection());
|
||||||
// check that metalookup pool would get created
|
// check that metalookup pool would get created
|
||||||
c.relocateRegion(tableName, row);
|
c.relocateRegion(tableName, row);
|
||||||
|
|
|
@ -74,7 +74,7 @@ public class TestReplicaWithCluster {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(TestReplicaWithCluster.class);
|
private static final Logger LOG = LoggerFactory.getLogger(TestReplicaWithCluster.class);
|
||||||
|
|
||||||
private static final int NB_SERVERS = 3;
|
private static final int NB_SERVERS = 3;
|
||||||
private static final byte[] row = TestReplicaWithCluster.class.getName().getBytes();
|
private static final byte[] row = Bytes.toBytes(TestReplicaWithCluster.class.getName());
|
||||||
private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
|
private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
|
||||||
|
|
||||||
// second minicluster used in testing of replication
|
// second minicluster used in testing of replication
|
||||||
|
|
|
@ -87,7 +87,7 @@ public class TestReplicasClient {
|
||||||
|
|
||||||
private static final int NB_SERVERS = 1;
|
private static final int NB_SERVERS = 1;
|
||||||
private static Table table = null;
|
private static Table table = null;
|
||||||
private static final byte[] row = TestReplicasClient.class.getName().getBytes();
|
private static final byte[] row = Bytes.toBytes(TestReplicasClient.class.getName());
|
||||||
|
|
||||||
private static HRegionInfo hriPrimary;
|
private static HRegionInfo hriPrimary;
|
||||||
private static HRegionInfo hriSecondary;
|
private static HRegionInfo hriSecondary;
|
||||||
|
@ -313,7 +313,7 @@ public class TestReplicasClient {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUseRegionWithoutReplica() throws Exception {
|
public void testUseRegionWithoutReplica() throws Exception {
|
||||||
byte[] b1 = "testUseRegionWithoutReplica".getBytes();
|
byte[] b1 = Bytes.toBytes("testUseRegionWithoutReplica");
|
||||||
openRegion(hriSecondary);
|
openRegion(hriSecondary);
|
||||||
SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(0));
|
SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(0));
|
||||||
try {
|
try {
|
||||||
|
@ -327,7 +327,7 @@ public class TestReplicasClient {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testLocations() throws Exception {
|
public void testLocations() throws Exception {
|
||||||
byte[] b1 = "testLocations".getBytes();
|
byte[] b1 = Bytes.toBytes("testLocations");
|
||||||
openRegion(hriSecondary);
|
openRegion(hriSecondary);
|
||||||
ClusterConnection hc = (ClusterConnection) HTU.getAdmin().getConnection();
|
ClusterConnection hc = (ClusterConnection) HTU.getAdmin().getConnection();
|
||||||
|
|
||||||
|
@ -352,7 +352,7 @@ public class TestReplicasClient {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetNoResultNoStaleRegionWithReplica() throws Exception {
|
public void testGetNoResultNoStaleRegionWithReplica() throws Exception {
|
||||||
byte[] b1 = "testGetNoResultNoStaleRegionWithReplica".getBytes();
|
byte[] b1 = Bytes.toBytes("testGetNoResultNoStaleRegionWithReplica");
|
||||||
openRegion(hriSecondary);
|
openRegion(hriSecondary);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -368,7 +368,7 @@ public class TestReplicasClient {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetNoResultStaleRegionWithReplica() throws Exception {
|
public void testGetNoResultStaleRegionWithReplica() throws Exception {
|
||||||
byte[] b1 = "testGetNoResultStaleRegionWithReplica".getBytes();
|
byte[] b1 = Bytes.toBytes("testGetNoResultStaleRegionWithReplica");
|
||||||
openRegion(hriSecondary);
|
openRegion(hriSecondary);
|
||||||
|
|
||||||
SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(1));
|
SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(1));
|
||||||
|
@ -385,7 +385,7 @@ public class TestReplicasClient {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetNoResultNotStaleSleepRegionWithReplica() throws Exception {
|
public void testGetNoResultNotStaleSleepRegionWithReplica() throws Exception {
|
||||||
byte[] b1 = "testGetNoResultNotStaleSleepRegionWithReplica".getBytes();
|
byte[] b1 = Bytes.toBytes("testGetNoResultNotStaleSleepRegionWithReplica");
|
||||||
openRegion(hriSecondary);
|
openRegion(hriSecondary);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -461,7 +461,7 @@ public class TestReplicasClient {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUseRegionWithReplica() throws Exception {
|
public void testUseRegionWithReplica() throws Exception {
|
||||||
byte[] b1 = "testUseRegionWithReplica".getBytes();
|
byte[] b1 = Bytes.toBytes("testUseRegionWithReplica");
|
||||||
openRegion(hriSecondary);
|
openRegion(hriSecondary);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -554,7 +554,7 @@ public class TestReplicasClient {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHedgedRead() throws Exception {
|
public void testHedgedRead() throws Exception {
|
||||||
byte[] b1 = "testHedgedRead".getBytes();
|
byte[] b1 = Bytes.toBytes("testHedgedRead");
|
||||||
openRegion(hriSecondary);
|
openRegion(hriSecondary);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -769,7 +769,7 @@ public class TestReplicasClient {
|
||||||
for (int col = 0; col < NUMCOLS; col++) {
|
for (int col = 0; col < NUMCOLS; col++) {
|
||||||
Put p = new Put(b1);
|
Put p = new Put(b1);
|
||||||
String qualifier = "qualifer" + col;
|
String qualifier = "qualifer" + col;
|
||||||
KeyValue kv = new KeyValue(b1, f, qualifier.getBytes());
|
KeyValue kv = new KeyValue(b1, f, Bytes.toBytes(qualifier));
|
||||||
p.add(kv);
|
p.add(kv);
|
||||||
table.put(p);
|
table.put(p);
|
||||||
}
|
}
|
||||||
|
|
|
@ -589,7 +589,7 @@ public class TestRegionObserverInterface {
|
||||||
ServerName sn2 = rs1.getRegionServer().getServerName();
|
ServerName sn2 = rs1.getRegionServer().getServerName();
|
||||||
String regEN = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regEN = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
|
|
||||||
util.getAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
|
util.getAdmin().move(Bytes.toBytes(regEN), Bytes.toBytes(sn2.getServerName()));
|
||||||
while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) {
|
while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) {
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
}
|
}
|
||||||
|
@ -639,7 +639,7 @@ public class TestRegionObserverInterface {
|
||||||
ServerName sn2 = rs1.getRegionServer().getServerName();
|
ServerName sn2 = rs1.getRegionServer().getServerName();
|
||||||
String regEN = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
String regEN = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
|
||||||
|
|
||||||
util.getAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
|
util.getAdmin().move(Bytes.toBytes(regEN), Bytes.toBytes(sn2.getServerName()));
|
||||||
while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) {
|
while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) {
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,7 +99,7 @@ public class TestFilterWrapper {
|
||||||
for (Cell kv : result.listCells()) {
|
for (Cell kv : result.listCells()) {
|
||||||
LOG.debug(kv_number + ". kv: " + kv);
|
LOG.debug(kv_number + ". kv: " + kv);
|
||||||
kv_number++;
|
kv_number++;
|
||||||
assertEquals("Returned row is not correct", new String(CellUtil.cloneRow(kv)),
|
assertEquals("Returned row is not correct", Bytes.toString(CellUtil.cloneRow(kv)),
|
||||||
"row" + ( row_number + 1 ));
|
"row" + ( row_number + 1 ));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,7 +125,7 @@ public class TestFuzzyRowAndColumnRangeFilter {
|
||||||
|
|
||||||
Put p = new Put(rk);
|
Put p = new Put(rk);
|
||||||
p.setDurability(Durability.SKIP_WAL);
|
p.setDurability(Durability.SKIP_WAL);
|
||||||
p.addColumn(cf.getBytes(), cq, Bytes.toBytes(c));
|
p.addColumn(Bytes.toBytes(cf), cq, Bytes.toBytes(c));
|
||||||
ht.put(p);
|
ht.put(p);
|
||||||
LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
|
LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
|
||||||
+ Bytes.toStringBinary(cq));
|
+ Bytes.toStringBinary(cq));
|
||||||
|
@ -167,7 +167,7 @@ public class TestFuzzyRowAndColumnRangeFilter {
|
||||||
private void runScanner(Table hTable, int expectedSize, Filter... filters) throws IOException {
|
private void runScanner(Table hTable, int expectedSize, Filter... filters) throws IOException {
|
||||||
String cf = "f";
|
String cf = "f";
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.addFamily(cf.getBytes());
|
scan.addFamily(Bytes.toBytes(cf));
|
||||||
FilterList filterList = new FilterList(filters);
|
FilterList filterList = new FilterList(filters);
|
||||||
scan.setFilter(filterList);
|
scan.setFilter(filterList);
|
||||||
|
|
||||||
|
|
|
@ -136,7 +136,7 @@ public class TestFuzzyRowFilterEndToEnd {
|
||||||
|
|
||||||
for (int i = 0; i < rows.length; i++) {
|
for (int i = 0; i < rows.length; i++) {
|
||||||
Put p = new Put(Bytes.toBytesBinary(rows[i]));
|
Put p = new Put(Bytes.toBytesBinary(rows[i]));
|
||||||
p.addColumn(cf.getBytes(), cq.getBytes(), "value".getBytes());
|
p.addColumn(Bytes.toBytes(cf), Bytes.toBytes(cq), Bytes.toBytes("value"));
|
||||||
ht.put(p);
|
ht.put(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,12 +191,12 @@ public class TestFuzzyRowFilterEndToEnd {
|
||||||
|
|
||||||
for(int i=0; i < rows.length; i++){
|
for(int i=0; i < rows.length; i++){
|
||||||
Put p = new Put(Bytes.toBytesBinary(rows[i]));
|
Put p = new Put(Bytes.toBytesBinary(rows[i]));
|
||||||
p.addColumn(cf.getBytes(), cq.getBytes(), "value".getBytes());
|
p.addColumn(Bytes.toBytes(cf), Bytes.toBytes(cq), Bytes.toBytes("value"));
|
||||||
ht.put(p);
|
ht.put(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
Put p = new Put(Bytes.toBytesBinary(badRow));
|
Put p = new Put(Bytes.toBytesBinary(badRow));
|
||||||
p.addColumn(cf.getBytes(), cq.getBytes(), "value".getBytes());
|
p.addColumn(Bytes.toBytes(cf), Bytes.toBytes(cq), Bytes.toBytes("value"));
|
||||||
ht.put(p);
|
ht.put(p);
|
||||||
|
|
||||||
TEST_UTIL.flush();
|
TEST_UTIL.flush();
|
||||||
|
@ -248,7 +248,7 @@ public class TestFuzzyRowFilterEndToEnd {
|
||||||
|
|
||||||
Put p = new Put(rk);
|
Put p = new Put(rk);
|
||||||
p.setDurability(Durability.SKIP_WAL);
|
p.setDurability(Durability.SKIP_WAL);
|
||||||
p.addColumn(cf.getBytes(), cq, Bytes.toBytes(c));
|
p.addColumn(Bytes.toBytes(cf), cq, Bytes.toBytes(c));
|
||||||
ht.put(p);
|
ht.put(p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -333,9 +333,9 @@ public class TestFuzzyRowFilterEndToEnd {
|
||||||
|
|
||||||
String cf = "f";
|
String cf = "f";
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.addFamily(cf.getBytes());
|
scan.addFamily(Bytes.toBytes(cf));
|
||||||
scan.setFilter(filter);
|
scan.setFilter(filter);
|
||||||
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(table.getBytes());
|
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(TableName.valueOf(table));
|
||||||
HRegion first = regions.get(0);
|
HRegion first = regions.get(0);
|
||||||
first.getScanner(scan);
|
first.getScanner(scan);
|
||||||
RegionScanner scanner = first.getScanner(scan);
|
RegionScanner scanner = first.getScanner(scan);
|
||||||
|
@ -385,7 +385,7 @@ public class TestFuzzyRowFilterEndToEnd {
|
||||||
|
|
||||||
Put p = new Put(rk);
|
Put p = new Put(rk);
|
||||||
p.setDurability(Durability.SKIP_WAL);
|
p.setDurability(Durability.SKIP_WAL);
|
||||||
p.addColumn(cf.getBytes(), cq, Bytes.toBytes(c));
|
p.addColumn(Bytes.toBytes(cf), cq, Bytes.toBytes(c));
|
||||||
ht.put(p);
|
ht.put(p);
|
||||||
LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
|
LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
|
||||||
+ Bytes.toStringBinary(cq));
|
+ Bytes.toStringBinary(cq));
|
||||||
|
@ -435,7 +435,7 @@ public class TestFuzzyRowFilterEndToEnd {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String cf = "f";
|
String cf = "f";
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.addFamily(cf.getBytes());
|
scan.addFamily(Bytes.toBytes(cf));
|
||||||
FilterList filterList = new FilterList(Operator.MUST_PASS_ALL, filter1, filter2);
|
FilterList filterList = new FilterList(Operator.MUST_PASS_ALL, filter1, filter2);
|
||||||
scan.setFilter(filterList);
|
scan.setFilter(filterList);
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.testclassification.FilterTests;
|
import org.apache.hadoop.hbase.testclassification.FilterTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
|
@ -84,7 +85,7 @@ public class TestScanRowPrefix extends FilterTestingCluster {
|
||||||
for (byte[] rowId: rowIds) {
|
for (byte[] rowId: rowIds) {
|
||||||
Put p = new Put(rowId);
|
Put p = new Put(rowId);
|
||||||
// Use the rowId as the column qualifier
|
// Use the rowId as the column qualifier
|
||||||
p.addColumn("F".getBytes(), rowId, "Dummy value".getBytes());
|
p.addColumn(Bytes.toBytes("F"), rowId, Bytes.toBytes("Dummy value"));
|
||||||
table.put(p);
|
table.put(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ public class TestEncodedSeekers {
|
||||||
private void doPuts(HRegion region) throws IOException{
|
private void doPuts(HRegion region) throws IOException{
|
||||||
LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
|
LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
|
||||||
for (int i = 0; i < NUM_ROWS; ++i) {
|
for (int i = 0; i < NUM_ROWS; ++i) {
|
||||||
byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
|
byte[] key = Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(i));
|
||||||
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
|
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
|
||||||
Put put = new Put(key);
|
Put put = new Put(key);
|
||||||
put.setDurability(Durability.ASYNC_WAL);
|
put.setDurability(Durability.ASYNC_WAL);
|
||||||
|
@ -177,7 +177,7 @@ public class TestEncodedSeekers {
|
||||||
|
|
||||||
private void doGets(Region region) throws IOException{
|
private void doGets(Region region) throws IOException{
|
||||||
for (int i = 0; i < NUM_ROWS; ++i) {
|
for (int i = 0; i < NUM_ROWS; ++i) {
|
||||||
final byte[] rowKey = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
|
final byte[] rowKey = Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(i));
|
||||||
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
|
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
|
||||||
final String qualStr = String.valueOf(j);
|
final String qualStr = String.valueOf(j);
|
||||||
if (VERBOSE) {
|
if (VERBOSE) {
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.hbase.io.hfile;
|
package org.apache.hadoop.hbase.io.hfile;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
|
@ -49,7 +50,7 @@ public class RandomKeyValueUtil {
|
||||||
|
|
||||||
public static KeyValue randomKeyValue(Random rand) {
|
public static KeyValue randomKeyValue(Random rand) {
|
||||||
return new KeyValue(randomRowOrQualifier(rand),
|
return new KeyValue(randomRowOrQualifier(rand),
|
||||||
COLUMN_FAMILY_NAME.getBytes(), randomRowOrQualifier(rand),
|
Bytes.toBytes(COLUMN_FAMILY_NAME), randomRowOrQualifier(rand),
|
||||||
randomValue(rand));
|
randomValue(rand));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,7 +61,7 @@ public class RandomKeyValueUtil {
|
||||||
- MIN_ROW_OR_QUALIFIER_LENGTH + 1);
|
- MIN_ROW_OR_QUALIFIER_LENGTH + 1);
|
||||||
for (int i = 0; i < fieldLen; ++i)
|
for (int i = 0; i < fieldLen; ++i)
|
||||||
field.append(randomReadableChar(rand));
|
field.append(randomReadableChar(rand));
|
||||||
return field.toString().getBytes();
|
return Bytes.toBytes(field.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
public static byte[] randomValue(Random rand) {
|
public static byte[] randomValue(Random rand) {
|
||||||
|
@ -69,7 +70,7 @@ public class RandomKeyValueUtil {
|
||||||
v.append((char) (32 + rand.nextInt(95)));
|
v.append((char) (32 + rand.nextInt(95)));
|
||||||
}
|
}
|
||||||
|
|
||||||
byte[] valueBytes = v.toString().getBytes();
|
byte[] valueBytes = Bytes.toBytes(v.toString());
|
||||||
return valueBytes;
|
return valueBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,7 +99,7 @@ public class RandomKeyValueUtil {
|
||||||
for (int j = 0; j < rand.nextInt(50); ++j)
|
for (int j = 0; j < rand.nextInt(50); ++j)
|
||||||
k.append(randomReadableChar(rand));
|
k.append(randomReadableChar(rand));
|
||||||
|
|
||||||
byte[] keyBytes = k.toString().getBytes();
|
byte[] keyBytes = Bytes.toBytes(k.toString());
|
||||||
return keyBytes;
|
return keyBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,7 +118,7 @@ public class RandomKeyValueUtil {
|
||||||
for (int j = 0; j < suffixLength; ++j)
|
for (int j = 0; j < suffixLength; ++j)
|
||||||
k.append(randomReadableChar(rand));
|
k.append(randomReadableChar(rand));
|
||||||
|
|
||||||
byte[] keyBytes = k.toString().getBytes();
|
byte[] keyBytes = Bytes.toBytes(k.toString());
|
||||||
return keyBytes;
|
return keyBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +128,7 @@ public class RandomKeyValueUtil {
|
||||||
v.append((char) (32 + rand.nextInt(95)));
|
v.append((char) (32 + rand.nextInt(95)));
|
||||||
}
|
}
|
||||||
|
|
||||||
byte[] valueBytes = v.toString().getBytes();
|
byte[] valueBytes = Bytes.toBytes(v.toString());
|
||||||
return valueBytes;
|
return valueBytes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -289,7 +289,7 @@ public class TestHFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte[] getSomeKey(int rowId) {
|
private byte[] getSomeKey(int rowId) {
|
||||||
KeyValue kv = new KeyValue(String.format(localFormatter, Integer.valueOf(rowId)).getBytes(),
|
KeyValue kv = new KeyValue(Bytes.toBytes(String.format(localFormatter, Integer.valueOf(rowId))),
|
||||||
Bytes.toBytes("family"), Bytes.toBytes("qual"), HConstants.LATEST_TIMESTAMP, Type.Put);
|
Bytes.toBytes("family"), Bytes.toBytes("qual"), HConstants.LATEST_TIMESTAMP, Type.Put);
|
||||||
return kv.getKey();
|
return kv.getKey();
|
||||||
}
|
}
|
||||||
|
@ -377,7 +377,7 @@ public class TestHFile {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void write(DataOutput out) throws IOException {
|
public void write(DataOutput out) throws IOException {
|
||||||
out.write(("something to test" + val).getBytes());
|
out.write(Bytes.toBytes("something to test" + val));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -394,7 +394,7 @@ public class TestHFile {
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
ByteBuff actual = reader.getMetaBlock("HFileMeta" + i, false).getBufferWithoutHeader();
|
ByteBuff actual = reader.getMetaBlock("HFileMeta" + i, false).getBufferWithoutHeader();
|
||||||
ByteBuffer expected =
|
ByteBuffer expected =
|
||||||
ByteBuffer.wrap(("something to test" + i).getBytes());
|
ByteBuffer.wrap(Bytes.toBytes("something to test" + i));
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"failed to match metadata",
|
"failed to match metadata",
|
||||||
Bytes.toStringBinary(expected),
|
Bytes.toStringBinary(expected),
|
||||||
|
@ -451,7 +451,8 @@ public class TestHFile {
|
||||||
.withOutputStream(fout)
|
.withOutputStream(fout)
|
||||||
.withFileContext(meta)
|
.withFileContext(meta)
|
||||||
.create();
|
.create();
|
||||||
KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
|
KeyValue kv = new KeyValue(Bytes.toBytes("foo"), Bytes.toBytes("f1"), null,
|
||||||
|
Bytes.toBytes("value"));
|
||||||
writer.append(kv);
|
writer.append(kv);
|
||||||
writer.close();
|
writer.close();
|
||||||
fout.close();
|
fout.close();
|
||||||
|
|
|
@ -276,7 +276,7 @@ public class TestHFileBlockIndex {
|
||||||
new HFileBlockIndex.BlockIndexWriter(hbw, null, null);
|
new HFileBlockIndex.BlockIndexWriter(hbw, null, null);
|
||||||
|
|
||||||
for (int i = 0; i < NUM_DATA_BLOCKS; ++i) {
|
for (int i = 0; i < NUM_DATA_BLOCKS; ++i) {
|
||||||
hbw.startWriting(BlockType.DATA).write(String.valueOf(rand.nextInt(1000)).getBytes());
|
hbw.startWriting(BlockType.DATA).write(Bytes.toBytes(String.valueOf(rand.nextInt(1000))));
|
||||||
long blockOffset = outputStream.getPos();
|
long blockOffset = outputStream.getPos();
|
||||||
hbw.writeHeaderAndData(outputStream);
|
hbw.writeHeaderAndData(outputStream);
|
||||||
|
|
||||||
|
|
|
@ -179,7 +179,8 @@ public class TestHFileEncryption {
|
||||||
.withFileContext(fileContext)
|
.withFileContext(fileContext)
|
||||||
.create();
|
.create();
|
||||||
try {
|
try {
|
||||||
KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
|
KeyValue kv = new KeyValue(Bytes.toBytes("foo"), Bytes.toBytes("f1"), null,
|
||||||
|
Bytes.toBytes("value"));
|
||||||
writer.append(kv);
|
writer.append(kv);
|
||||||
} finally {
|
} finally {
|
||||||
writer.close();
|
writer.close();
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
|
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
|
||||||
import org.apache.hadoop.hbase.testclassification.IOTests;
|
import org.apache.hadoop.hbase.testclassification.IOTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.io.BytesWritable;
|
import org.apache.hadoop.io.BytesWritable;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
@ -66,8 +67,8 @@ public class TestHFileSeek extends TestCase {
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
HBaseClassTestRule.forClass(TestHFileSeek.class);
|
HBaseClassTestRule.forClass(TestHFileSeek.class);
|
||||||
|
|
||||||
private static final byte[] CF = "f1".getBytes();
|
private static final byte[] CF = Bytes.toBytes("f1");
|
||||||
private static final byte[] QUAL = "q1".getBytes();
|
private static final byte[] QUAL = Bytes.toBytes("q1");
|
||||||
private static final boolean USE_PREAD = true;
|
private static final boolean USE_PREAD = true;
|
||||||
private MyOptions options;
|
private MyOptions options;
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
|
||||||
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
|
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
|
||||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.net.DNSToSwitchMapping;
|
import org.apache.hadoop.net.DNSToSwitchMapping;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -259,8 +260,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
||||||
Mockito.when(services.getServerManager()).thenReturn(sm);
|
Mockito.when(services.getServerManager()).thenReturn(sm);
|
||||||
balancer.setMasterServices(services);
|
balancer.setMasterServices(services);
|
||||||
RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||||
.setStartKey("key1".getBytes())
|
.setStartKey(Bytes.toBytes("key1"))
|
||||||
.setEndKey("key2".getBytes())
|
.setEndKey(Bytes.toBytes("key2"))
|
||||||
.setSplit(false)
|
.setSplit(false)
|
||||||
.setRegionId(100)
|
.setRegionId(100)
|
||||||
.build();
|
.build();
|
||||||
|
@ -284,8 +285,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
||||||
List<RegionInfo> list2 = new ArrayList<>();
|
List<RegionInfo> list2 = new ArrayList<>();
|
||||||
// create a region (region1)
|
// create a region (region1)
|
||||||
RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||||
.setStartKey("key1".getBytes())
|
.setStartKey(Bytes.toBytes("key1"))
|
||||||
.setEndKey("key2".getBytes())
|
.setEndKey(Bytes.toBytes("key2"))
|
||||||
.setSplit(false)
|
.setSplit(false)
|
||||||
.setRegionId(100)
|
.setRegionId(100)
|
||||||
.build();
|
.build();
|
||||||
|
@ -293,8 +294,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
||||||
RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
|
RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
|
||||||
// create a second region (region2)
|
// create a second region (region2)
|
||||||
RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||||
.setStartKey("key2".getBytes())
|
.setStartKey(Bytes.toBytes("key2"))
|
||||||
.setEndKey("key3".getBytes())
|
.setEndKey(Bytes.toBytes("key3"))
|
||||||
.setSplit(false)
|
.setSplit(false)
|
||||||
.setRegionId(101)
|
.setRegionId(101)
|
||||||
.build();
|
.build();
|
||||||
|
@ -358,8 +359,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
||||||
List<RegionInfo> list2 = new ArrayList<>();
|
List<RegionInfo> list2 = new ArrayList<>();
|
||||||
// create a region (region1)
|
// create a region (region1)
|
||||||
RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||||
.setStartKey("key1".getBytes())
|
.setStartKey(Bytes.toBytes("key1"))
|
||||||
.setEndKey("key2".getBytes())
|
.setEndKey(Bytes.toBytes("key2"))
|
||||||
.setSplit(false)
|
.setSplit(false)
|
||||||
.setRegionId(100)
|
.setRegionId(100)
|
||||||
.build();
|
.build();
|
||||||
|
@ -367,8 +368,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
||||||
RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
|
RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
|
||||||
// create a second region (region2)
|
// create a second region (region2)
|
||||||
RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||||
.setStartKey("key2".getBytes())
|
.setStartKey(Bytes.toBytes("key2"))
|
||||||
.setEndKey("key3".getBytes())
|
.setEndKey(Bytes.toBytes("key3"))
|
||||||
.setSplit(false)
|
.setSplit(false)
|
||||||
.setRegionId(101)
|
.setRegionId(101)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
|
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
|
||||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
@ -80,7 +81,8 @@ public class TestLockManager {
|
||||||
UTIL.startMiniCluster(1);
|
UTIL.startMiniCluster(1);
|
||||||
masterServices = UTIL.getMiniHBaseCluster().getMaster();
|
masterServices = UTIL.getMiniHBaseCluster().getMaster();
|
||||||
UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build());
|
UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build());
|
||||||
UTIL.createTable(tableName, new byte[][]{"fam".getBytes()}, new byte[][] {"1".getBytes()});
|
UTIL.createTable(tableName, new byte[][]{Bytes.toBytes("fam")},
|
||||||
|
new byte[][] {Bytes.toBytes("1")});
|
||||||
List<HRegionInfo> regions = UTIL.getAdmin().getTableRegions(tableName);
|
List<HRegionInfo> regions = UTIL.getAdmin().getTableRegions(tableName);
|
||||||
assert regions.size() > 0;
|
assert regions.size() > 0;
|
||||||
tableRegions = new HRegionInfo[regions.size()];
|
tableRegions = new HRegionInfo[regions.size()];
|
||||||
|
|
|
@ -289,7 +289,7 @@ public class MasterProcedureTestingUtility {
|
||||||
TableDescriptor htd = master.getTableDescriptors().get(tableName);
|
TableDescriptor htd = master.getTableDescriptors().get(tableName);
|
||||||
assertTrue(htd != null);
|
assertTrue(htd != null);
|
||||||
|
|
||||||
assertTrue(htd.hasColumnFamily(family.getBytes()));
|
assertTrue(htd.hasColumnFamily(Bytes.toBytes(family)));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
|
public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
|
||||||
|
@ -297,7 +297,7 @@ public class MasterProcedureTestingUtility {
|
||||||
// verify htd
|
// verify htd
|
||||||
TableDescriptor htd = master.getTableDescriptors().get(tableName);
|
TableDescriptor htd = master.getTableDescriptors().get(tableName);
|
||||||
assertTrue(htd != null);
|
assertTrue(htd != null);
|
||||||
assertFalse(htd.hasColumnFamily(family.getBytes()));
|
assertFalse(htd.hasColumnFamily(Bytes.toBytes(family)));
|
||||||
|
|
||||||
// verify fs
|
// verify fs
|
||||||
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
|
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
|
||||||
|
@ -314,7 +314,7 @@ public class MasterProcedureTestingUtility {
|
||||||
TableDescriptor htd = master.getTableDescriptors().get(tableName);
|
TableDescriptor htd = master.getTableDescriptors().get(tableName);
|
||||||
assertTrue(htd != null);
|
assertTrue(htd != null);
|
||||||
|
|
||||||
ColumnFamilyDescriptor hcfd = htd.getColumnFamily(family.getBytes());
|
ColumnFamilyDescriptor hcfd = htd.getColumnFamily(Bytes.toBytes(family));
|
||||||
assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(hcfd, columnDescriptor));
|
assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(hcfd, columnDescriptor));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.DaemonThreadFactory;
|
import org.apache.hadoop.hbase.DaemonThreadFactory;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
||||||
import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
||||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||||
|
@ -249,7 +250,7 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager {
|
||||||
@Override
|
@Override
|
||||||
public byte[] insideBarrier() throws ForeignException {
|
public byte[] insideBarrier() throws ForeignException {
|
||||||
execute();
|
execute();
|
||||||
return SimpleMasterProcedureManager.SIMPLE_DATA.getBytes();
|
return Bytes.toBytes(SimpleMasterProcedureManager.SIMPLE_DATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -68,6 +69,6 @@ public class TestProcedureManager {
|
||||||
byte[] result = admin.execProcedureWithRet(SimpleMasterProcedureManager.SIMPLE_SIGNATURE,
|
byte[] result = admin.execProcedureWithRet(SimpleMasterProcedureManager.SIMPLE_SIGNATURE,
|
||||||
"mytest", new HashMap<>());
|
"mytest", new HashMap<>());
|
||||||
assertArrayEquals("Incorrect return data from execProcedure",
|
assertArrayEquals("Incorrect return data from execProcedure",
|
||||||
SimpleMasterProcedureManager.SIMPLE_DATA.getBytes(), result);
|
Bytes.toBytes(SimpleMasterProcedureManager.SIMPLE_DATA), result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
||||||
|
@ -67,7 +68,7 @@ public class TestZKProcedureControllers {
|
||||||
private static final String CONTROLLER_NODE_NAME = "controller";
|
private static final String CONTROLLER_NODE_NAME = "controller";
|
||||||
private static final VerificationMode once = Mockito.times(1);
|
private static final VerificationMode once = Mockito.times(1);
|
||||||
|
|
||||||
private final byte[] memberData = new String("data from member").getBytes();
|
private final byte[] memberData = Bytes.toBytes("data from member");
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setupTest() throws Exception {
|
public static void setupTest() throws Exception {
|
||||||
|
|
|
@ -357,8 +357,8 @@ public class TestCompoundBloomFilter {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCreateKey() {
|
public void testCreateKey() {
|
||||||
byte[] row = "myRow".getBytes();
|
byte[] row = Bytes.toBytes("myRow");
|
||||||
byte[] qualifier = "myQualifier".getBytes();
|
byte[] qualifier = Bytes.toBytes("myQualifier");
|
||||||
// Mimic what Storefile.createBloomKeyValue() does
|
// Mimic what Storefile.createBloomKeyValue() does
|
||||||
byte[] rowKey = KeyValueUtil.createFirstOnRow(row, 0, row.length, new byte[0], 0, 0, row, 0, 0).getKey();
|
byte[] rowKey = KeyValueUtil.createFirstOnRow(row, 0, row.length, new byte[0], 0, 0, row, 0, 0).getKey();
|
||||||
byte[] rowColKey = KeyValueUtil.createFirstOnRow(row, 0, row.length,
|
byte[] rowColKey = KeyValueUtil.createFirstOnRow(row, 0, row.length,
|
||||||
|
|
|
@ -106,7 +106,7 @@ public class TestMultiLogThreshold {
|
||||||
RegionAction.Builder rab = RegionAction.newBuilder();
|
RegionAction.Builder rab = RegionAction.newBuilder();
|
||||||
rab.setRegion(RequestConverter.buildRegionSpecifier(
|
rab.setRegion(RequestConverter.buildRegionSpecifier(
|
||||||
HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME,
|
HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME,
|
||||||
new String("someStuff" + i).getBytes()));
|
Bytes.toBytes("someStuff" + i)));
|
||||||
for (int j = 0; j < numAs; j++) {
|
for (int j = 0; j < numAs; j++) {
|
||||||
Action.Builder ab = Action.newBuilder();
|
Action.Builder ab = Action.newBuilder();
|
||||||
rab.addAction(ab.build());
|
rab.addAction(ab.build());
|
||||||
|
|
|
@ -49,7 +49,6 @@ public class TestRegionReplicasWithModifyTable {
|
||||||
|
|
||||||
private static final int NB_SERVERS = 3;
|
private static final int NB_SERVERS = 3;
|
||||||
private static Table table;
|
private static Table table;
|
||||||
private static final byte[] row = "TestRegionReplicasWithModifyTable".getBytes();
|
|
||||||
|
|
||||||
private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
|
private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
|
||||||
private static final byte[] f = HConstants.CATALOG_FAMILY;
|
private static final byte[] f = HConstants.CATALOG_FAMILY;
|
||||||
|
|
|
@ -82,17 +82,17 @@ public class TestRegionServerReadRequestMetrics {
|
||||||
LoggerFactory.getLogger(TestRegionServerReadRequestMetrics.class);
|
LoggerFactory.getLogger(TestRegionServerReadRequestMetrics.class);
|
||||||
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
private static final TableName TABLE_NAME = TableName.valueOf("test");
|
private static final TableName TABLE_NAME = TableName.valueOf("test");
|
||||||
private static final byte[] CF1 = "c1".getBytes();
|
private static final byte[] CF1 = Bytes.toBytes("c1");
|
||||||
private static final byte[] CF2 = "c2".getBytes();
|
private static final byte[] CF2 = Bytes.toBytes("c2");
|
||||||
|
|
||||||
private static final byte[] ROW1 = "a".getBytes();
|
private static final byte[] ROW1 = Bytes.toBytes("a");
|
||||||
private static final byte[] ROW2 = "b".getBytes();
|
private static final byte[] ROW2 = Bytes.toBytes("b");
|
||||||
private static final byte[] ROW3 = "c".getBytes();
|
private static final byte[] ROW3 = Bytes.toBytes("c");
|
||||||
private static final byte[] COL1 = "q1".getBytes();
|
private static final byte[] COL1 = Bytes.toBytes("q1");
|
||||||
private static final byte[] COL2 = "q2".getBytes();
|
private static final byte[] COL2 = Bytes.toBytes("q2");
|
||||||
private static final byte[] COL3 = "q3".getBytes();
|
private static final byte[] COL3 = Bytes.toBytes("q3");
|
||||||
private static final byte[] VAL1 = "v1".getBytes();
|
private static final byte[] VAL1 = Bytes.toBytes("v1");
|
||||||
private static final byte[] VAL2 = "v2".getBytes();
|
private static final byte[] VAL2 = Bytes.toBytes("v2");
|
||||||
private static final byte[] VAL3 = Bytes.toBytes(0L);
|
private static final byte[] VAL3 = Bytes.toBytes(0L);
|
||||||
|
|
||||||
private static final int MAX_TRY = 20;
|
private static final int MAX_TRY = 20;
|
||||||
|
|
|
@ -455,12 +455,12 @@ public class TestSplitTransactionOnCluster {
|
||||||
try {
|
try {
|
||||||
for (int i = 0; i <= 5; i++) {
|
for (int i = 0; i <= 5; i++) {
|
||||||
String row = "row" + i;
|
String row = "row" + i;
|
||||||
Put p = new Put(row.getBytes());
|
Put p = new Put(Bytes.toBytes(row));
|
||||||
String val = "Val" + i;
|
String val = "Val" + i;
|
||||||
p.addColumn("col".getBytes(), "ql".getBytes(), val.getBytes());
|
p.addColumn(Bytes.toBytes("col"), Bytes.toBytes("ql"), Bytes.toBytes(val));
|
||||||
table.put(p);
|
table.put(p);
|
||||||
admin.flush(userTableName);
|
admin.flush(userTableName);
|
||||||
Delete d = new Delete(row.getBytes());
|
Delete d = new Delete(Bytes.toBytes(row));
|
||||||
// Do a normal delete
|
// Do a normal delete
|
||||||
table.delete(d);
|
table.delete(d);
|
||||||
admin.flush(userTableName);
|
admin.flush(userTableName);
|
||||||
|
@ -471,17 +471,17 @@ public class TestSplitTransactionOnCluster {
|
||||||
.getRegionsOfTable(userTableName);
|
.getRegionsOfTable(userTableName);
|
||||||
assertEquals(1, regionsOfTable.size());
|
assertEquals(1, regionsOfTable.size());
|
||||||
RegionInfo hRegionInfo = regionsOfTable.get(0);
|
RegionInfo hRegionInfo = regionsOfTable.get(0);
|
||||||
Put p = new Put("row6".getBytes());
|
Put p = new Put(Bytes.toBytes("row6"));
|
||||||
p.addColumn("col".getBytes(), "ql".getBytes(), "val".getBytes());
|
p.addColumn(Bytes.toBytes("col"), Bytes.toBytes("ql"), Bytes.toBytes("val"));
|
||||||
table.put(p);
|
table.put(p);
|
||||||
p = new Put("row7".getBytes());
|
p = new Put(Bytes.toBytes("row7"));
|
||||||
p.addColumn("col".getBytes(), "ql".getBytes(), "val".getBytes());
|
p.addColumn(Bytes.toBytes("col"), Bytes.toBytes("ql"), Bytes.toBytes("val"));
|
||||||
table.put(p);
|
table.put(p);
|
||||||
p = new Put("row8".getBytes());
|
p = new Put(Bytes.toBytes("row8"));
|
||||||
p.addColumn("col".getBytes(), "ql".getBytes(), "val".getBytes());
|
p.addColumn(Bytes.toBytes("col"), Bytes.toBytes("ql"), Bytes.toBytes("val"));
|
||||||
table.put(p);
|
table.put(p);
|
||||||
admin.flush(userTableName);
|
admin.flush(userTableName);
|
||||||
admin.splitRegionAsync(hRegionInfo.getRegionName(), "row7".getBytes());
|
admin.splitRegionAsync(hRegionInfo.getRegionName(), Bytes.toBytes("row7"));
|
||||||
regionsOfTable = cluster.getMaster()
|
regionsOfTable = cluster.getMaster()
|
||||||
.getAssignmentManager().getRegionStates()
|
.getAssignmentManager().getRegionStates()
|
||||||
.getRegionsOfTable(userTableName);
|
.getRegionsOfTable(userTableName);
|
||||||
|
@ -630,7 +630,7 @@ public class TestSplitTransactionOnCluster {
|
||||||
tableName);
|
tableName);
|
||||||
assertEquals("The specified table should be present.", true, tableExists);
|
assertEquals("The specified table should be present.", true, tableExists);
|
||||||
// exists works on stale and we see the put after the flush
|
// exists works on stale and we see the put after the flush
|
||||||
byte[] b1 = "row1".getBytes();
|
byte[] b1 = Bytes.toBytes("row1");
|
||||||
Get g = new Get(b1);
|
Get g = new Get(b1);
|
||||||
g.setConsistency(Consistency.STRONG);
|
g.setConsistency(Consistency.STRONG);
|
||||||
// The following GET will make a trip to the meta to get the new location of the 1st daughter
|
// The following GET will make a trip to the meta to get the new location of the 1st daughter
|
||||||
|
|
|
@ -143,7 +143,7 @@ public class TestWALMonotonicallyIncreasingSeqId {
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
byte[] row = Bytes.toBytes("putRow" + i);
|
byte[] row = Bytes.toBytes("putRow" + i);
|
||||||
Put put = new Put(row);
|
Put put = new Put(row);
|
||||||
put.addColumn("cf".getBytes(), Bytes.toBytes(0), Bytes.toBytes(""));
|
put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes(0), new byte[0]);
|
||||||
latch.await();
|
latch.await();
|
||||||
region.batchMutate(new Mutation[] { put });
|
region.batchMutate(new Mutation[] { put });
|
||||||
Thread.sleep(10);
|
Thread.sleep(10);
|
||||||
|
@ -168,7 +168,7 @@ public class TestWALMonotonicallyIncreasingSeqId {
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
byte[] row = Bytes.toBytes("incrementRow" + i);
|
byte[] row = Bytes.toBytes("incrementRow" + i);
|
||||||
Increment inc = new Increment(row);
|
Increment inc = new Increment(row);
|
||||||
inc.addColumn("cf".getBytes(), Bytes.toBytes(0), 1);
|
inc.addColumn(Bytes.toBytes("cf"), Bytes.toBytes(0), 1);
|
||||||
// inc.setDurability(Durability.ASYNC_WAL);
|
// inc.setDurability(Durability.ASYNC_WAL);
|
||||||
region.increment(inc);
|
region.increment(inc);
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.regionserver.Region;
|
import org.apache.hadoop.hbase.regionserver.Region;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -66,7 +67,7 @@ public class TestStoreHotnessProtector {
|
||||||
|
|
||||||
Store mockStore1 = mock(Store.class);
|
Store mockStore1 = mock(Store.class);
|
||||||
RegionInfo mockRegionInfo = mock(RegionInfo.class);
|
RegionInfo mockRegionInfo = mock(RegionInfo.class);
|
||||||
byte[] family = "testF1".getBytes();
|
byte[] family = Bytes.toBytes("testF1");
|
||||||
|
|
||||||
when(mockRegion.getStore(family)).thenReturn(mockStore1);
|
when(mockRegion.getStore(family)).thenReturn(mockStore1);
|
||||||
when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
|
when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
|
||||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext;
|
||||||
import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable;
|
import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
|
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
|
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
|
||||||
import org.apache.hadoop.hbase.wal.WAL.Entry;
|
import org.apache.hadoop.hbase.wal.WAL.Entry;
|
||||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||||
|
@ -90,7 +91,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
|
||||||
private static TableName tableName = TableName.valueOf(
|
private static TableName tableName = TableName.valueOf(
|
||||||
TestRegionReplicaReplicationEndpointNoMaster.class.getSimpleName());
|
TestRegionReplicaReplicationEndpointNoMaster.class.getSimpleName());
|
||||||
private static Table table;
|
private static Table table;
|
||||||
private static final byte[] row = "TestRegionReplicaReplicator".getBytes();
|
private static final byte[] row = Bytes.toBytes("TestRegionReplicaReplicator");
|
||||||
|
|
||||||
private static HRegionServer rs0;
|
private static HRegionServer rs0;
|
||||||
private static HRegionServer rs1;
|
private static HRegionServer rs1;
|
||||||
|
|
|
@ -876,7 +876,7 @@ public class TestWithDisabledAuthorization extends SecureTestUtil {
|
||||||
public Object run() throws Exception {
|
public Object run() throws Exception {
|
||||||
ACCESS_CONTROLLER.preCheckAndPut(ObserverContextImpl.createAndPrepare(RCP_ENV),
|
ACCESS_CONTROLLER.preCheckAndPut(ObserverContextImpl.createAndPrepare(RCP_ENV),
|
||||||
TEST_ROW, TEST_FAMILY, TEST_Q1, CompareOperator.EQUAL,
|
TEST_ROW, TEST_FAMILY, TEST_Q1, CompareOperator.EQUAL,
|
||||||
new BinaryComparator("foo".getBytes()), new Put(TEST_ROW), true);
|
new BinaryComparator(Bytes.toBytes("foo")), new Put(TEST_ROW), true);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
|
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
|
||||||
|
@ -887,7 +887,7 @@ public class TestWithDisabledAuthorization extends SecureTestUtil {
|
||||||
public Object run() throws Exception {
|
public Object run() throws Exception {
|
||||||
ACCESS_CONTROLLER.preCheckAndDelete(ObserverContextImpl.createAndPrepare(RCP_ENV),
|
ACCESS_CONTROLLER.preCheckAndDelete(ObserverContextImpl.createAndPrepare(RCP_ENV),
|
||||||
TEST_ROW, TEST_FAMILY, TEST_Q1, CompareOperator.EQUAL,
|
TEST_ROW, TEST_FAMILY, TEST_Q1, CompareOperator.EQUAL,
|
||||||
new BinaryComparator("foo".getBytes()), new Delete(TEST_ROW), true);
|
new BinaryComparator(Bytes.toBytes("foo")), new Delete(TEST_ROW), true);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
|
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
|
||||||
|
|
|
@ -648,45 +648,45 @@ public class TestLoadIncrementalHFiles {
|
||||||
|
|
||||||
first = "a";
|
first = "a";
|
||||||
last = "e";
|
last = "e";
|
||||||
addStartEndKeysForTest(map, first.getBytes(), last.getBytes());
|
addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
|
||||||
|
|
||||||
first = "r";
|
first = "r";
|
||||||
last = "s";
|
last = "s";
|
||||||
addStartEndKeysForTest(map, first.getBytes(), last.getBytes());
|
addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
|
||||||
|
|
||||||
first = "o";
|
first = "o";
|
||||||
last = "p";
|
last = "p";
|
||||||
addStartEndKeysForTest(map, first.getBytes(), last.getBytes());
|
addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
|
||||||
|
|
||||||
first = "g";
|
first = "g";
|
||||||
last = "k";
|
last = "k";
|
||||||
addStartEndKeysForTest(map, first.getBytes(), last.getBytes());
|
addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
|
||||||
|
|
||||||
first = "v";
|
first = "v";
|
||||||
last = "x";
|
last = "x";
|
||||||
addStartEndKeysForTest(map, first.getBytes(), last.getBytes());
|
addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
|
||||||
|
|
||||||
first = "c";
|
first = "c";
|
||||||
last = "i";
|
last = "i";
|
||||||
addStartEndKeysForTest(map, first.getBytes(), last.getBytes());
|
addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
|
||||||
|
|
||||||
first = "m";
|
first = "m";
|
||||||
last = "q";
|
last = "q";
|
||||||
addStartEndKeysForTest(map, first.getBytes(), last.getBytes());
|
addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
|
||||||
|
|
||||||
first = "s";
|
first = "s";
|
||||||
last = "t";
|
last = "t";
|
||||||
addStartEndKeysForTest(map, first.getBytes(), last.getBytes());
|
addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
|
||||||
|
|
||||||
first = "u";
|
first = "u";
|
||||||
last = "w";
|
last = "w";
|
||||||
addStartEndKeysForTest(map, first.getBytes(), last.getBytes());
|
addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
|
||||||
|
|
||||||
byte[][] keysArray = LoadIncrementalHFiles.inferBoundaries(map);
|
byte[][] keysArray = LoadIncrementalHFiles.inferBoundaries(map);
|
||||||
byte[][] compare = new byte[3][];
|
byte[][] compare = new byte[3][];
|
||||||
compare[0] = "m".getBytes();
|
compare[0] = Bytes.toBytes("m");
|
||||||
compare[1] = "r".getBytes();
|
compare[1] = Bytes.toBytes("r");
|
||||||
compare[2] = "u".getBytes();
|
compare[2] = Bytes.toBytes("u");
|
||||||
|
|
||||||
assertEquals(3, keysArray.length);
|
assertEquals(3, keysArray.length);
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ public class TestBloomFilterChunk extends TestCase {
|
||||||
(int) bf2.byteSize, bf2.hash, bf2.hashCount));
|
(int) bf2.byteSize, bf2.hash, bf2.hashCount));
|
||||||
|
|
||||||
byte [] bkey = {1,2,3,4};
|
byte [] bkey = {1,2,3,4};
|
||||||
byte [] bval = "this is a much larger byte array".getBytes();
|
byte [] bval = Bytes.toBytes("this is a much larger byte array");
|
||||||
|
|
||||||
bf1.add(bkey, 0, bkey.length);
|
bf1.add(bkey, 0, bkey.length);
|
||||||
bf1.add(bval, 1, bval.length-1);
|
bf1.add(bval, 1, bval.length-1);
|
||||||
|
|
|
@ -85,7 +85,7 @@ public class TestRegionMover {
|
||||||
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build();
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build();
|
||||||
String startKey = "a";
|
String startKey = "a";
|
||||||
String endKey = "z";
|
String endKey = "z";
|
||||||
admin.createTable(tableDesc, startKey.getBytes(), endKey.getBytes(), 9);
|
admin.createTable(tableDesc, Bytes.toBytes(startKey), Bytes.toBytes(endKey), 9);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -86,21 +86,21 @@ public class TestRegionSplitter {
|
||||||
public void testCreatePresplitTableHex() throws Exception {
|
public void testCreatePresplitTableHex() throws Exception {
|
||||||
final List<byte[]> expectedBounds = new ArrayList<>(17);
|
final List<byte[]> expectedBounds = new ArrayList<>(17);
|
||||||
expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);
|
expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);
|
||||||
expectedBounds.add("10000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("10000000"));
|
||||||
expectedBounds.add("20000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("20000000"));
|
||||||
expectedBounds.add("30000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("30000000"));
|
||||||
expectedBounds.add("40000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("40000000"));
|
||||||
expectedBounds.add("50000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("50000000"));
|
||||||
expectedBounds.add("60000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("60000000"));
|
||||||
expectedBounds.add("70000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("70000000"));
|
||||||
expectedBounds.add("80000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("80000000"));
|
||||||
expectedBounds.add("90000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("90000000"));
|
||||||
expectedBounds.add("a0000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("a0000000"));
|
||||||
expectedBounds.add("b0000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("b0000000"));
|
||||||
expectedBounds.add("c0000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("c0000000"));
|
||||||
expectedBounds.add("d0000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("d0000000"));
|
||||||
expectedBounds.add("e0000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("e0000000"));
|
||||||
expectedBounds.add("f0000000".getBytes());
|
expectedBounds.add(Bytes.toBytes("f0000000"));
|
||||||
expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);
|
expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);
|
||||||
|
|
||||||
// Do table creation/pre-splitting and verification of region boundaries
|
// Do table creation/pre-splitting and verification of region boundaries
|
||||||
|
@ -149,41 +149,42 @@ public class TestRegionSplitter {
|
||||||
|
|
||||||
byte[][] twoRegionsSplits = splitter.split(2);
|
byte[][] twoRegionsSplits = splitter.split(2);
|
||||||
assertEquals(1, twoRegionsSplits.length);
|
assertEquals(1, twoRegionsSplits.length);
|
||||||
assertArrayEquals("80000000".getBytes(), twoRegionsSplits[0]);
|
assertArrayEquals(Bytes.toBytes("80000000"), twoRegionsSplits[0]);
|
||||||
|
|
||||||
byte[][] threeRegionsSplits = splitter.split(3);
|
byte[][] threeRegionsSplits = splitter.split(3);
|
||||||
assertEquals(2, threeRegionsSplits.length);
|
assertEquals(2, threeRegionsSplits.length);
|
||||||
byte[] expectedSplit0 = "55555555".getBytes();
|
byte[] expectedSplit0 = Bytes.toBytes("55555555");
|
||||||
assertArrayEquals(expectedSplit0, threeRegionsSplits[0]);
|
assertArrayEquals(expectedSplit0, threeRegionsSplits[0]);
|
||||||
byte[] expectedSplit1 = "aaaaaaaa".getBytes();
|
byte[] expectedSplit1 = Bytes.toBytes("aaaaaaaa");
|
||||||
assertArrayEquals(expectedSplit1, threeRegionsSplits[1]);
|
assertArrayEquals(expectedSplit1, threeRegionsSplits[1]);
|
||||||
|
|
||||||
// Check splitting existing regions that have start and end points
|
// Check splitting existing regions that have start and end points
|
||||||
byte[] splitPoint = splitter.split("10000000".getBytes(), "30000000".getBytes());
|
byte[] splitPoint = splitter.split(Bytes.toBytes("10000000"), Bytes.toBytes("30000000"));
|
||||||
assertArrayEquals("20000000".getBytes(), splitPoint);
|
assertArrayEquals(Bytes.toBytes("20000000"), splitPoint);
|
||||||
|
|
||||||
byte[] lastRow = "ffffffff".getBytes();
|
byte[] lastRow = Bytes.toBytes("ffffffff");
|
||||||
assertArrayEquals(lastRow, splitter.lastRow());
|
assertArrayEquals(lastRow, splitter.lastRow());
|
||||||
byte[] firstRow = "00000000".getBytes();
|
byte[] firstRow = Bytes.toBytes("00000000");
|
||||||
assertArrayEquals(firstRow, splitter.firstRow());
|
assertArrayEquals(firstRow, splitter.firstRow());
|
||||||
|
|
||||||
// Halfway between 00... and 20... should be 10...
|
// Halfway between 00... and 20... should be 10...
|
||||||
splitPoint = splitter.split(firstRow, "20000000".getBytes());
|
splitPoint = splitter.split(firstRow, Bytes.toBytes("20000000"));
|
||||||
assertArrayEquals("10000000".getBytes(), splitPoint);
|
assertArrayEquals(Bytes.toBytes("10000000"), splitPoint);
|
||||||
|
|
||||||
// Halfway between df... and ff... should be ef....
|
// Halfway between df... and ff... should be ef....
|
||||||
splitPoint = splitter.split("dfffffff".getBytes(), lastRow);
|
splitPoint = splitter.split(Bytes.toBytes("dfffffff"), lastRow);
|
||||||
assertArrayEquals("efffffff".getBytes(), splitPoint);
|
assertArrayEquals(Bytes.toBytes("efffffff"), splitPoint);
|
||||||
|
|
||||||
// Check splitting region with multiple mappers per region
|
// Check splitting region with multiple mappers per region
|
||||||
byte[][] splits = splitter.split("00000000".getBytes(), "30000000".getBytes(), 3, false);
|
byte[][] splits = splitter.split(Bytes.toBytes("00000000"), Bytes.toBytes("30000000"),
|
||||||
|
3, false);
|
||||||
assertEquals(2, splits.length);
|
assertEquals(2, splits.length);
|
||||||
assertArrayEquals("10000000".getBytes(), splits[0]);
|
assertArrayEquals(Bytes.toBytes("10000000"), splits[0]);
|
||||||
assertArrayEquals("20000000".getBytes(), splits[1]);
|
assertArrayEquals(Bytes.toBytes("20000000"), splits[1]);
|
||||||
|
|
||||||
splits = splitter.split("00000000".getBytes(), "20000000".getBytes(), 2, true);
|
splits = splitter.split(Bytes.toBytes("00000000"), Bytes.toBytes("20000000"), 2, true);
|
||||||
assertEquals(3, splits.length);
|
assertEquals(3, splits.length);
|
||||||
assertArrayEquals("10000000".getBytes(), splits[1]);
|
assertArrayEquals(Bytes.toBytes("10000000"), splits[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -197,45 +198,46 @@ public class TestRegionSplitter {
|
||||||
|
|
||||||
byte[][] twoRegionsSplits = splitter.split(2);
|
byte[][] twoRegionsSplits = splitter.split(2);
|
||||||
assertEquals(1, twoRegionsSplits.length);
|
assertEquals(1, twoRegionsSplits.length);
|
||||||
assertArrayEquals("50000000".getBytes(), twoRegionsSplits[0]);
|
assertArrayEquals(Bytes.toBytes("50000000"), twoRegionsSplits[0]);
|
||||||
|
|
||||||
byte[][] threeRegionsSplits = splitter.split(3);
|
byte[][] threeRegionsSplits = splitter.split(3);
|
||||||
assertEquals(2, threeRegionsSplits.length);
|
assertEquals(2, threeRegionsSplits.length);
|
||||||
byte[] expectedSplit0 = "33333333".getBytes();
|
byte[] expectedSplit0 = Bytes.toBytes("33333333");
|
||||||
assertArrayEquals(expectedSplit0, threeRegionsSplits[0]);
|
assertArrayEquals(expectedSplit0, threeRegionsSplits[0]);
|
||||||
byte[] expectedSplit1 = "66666666".getBytes();
|
byte[] expectedSplit1 = Bytes.toBytes("66666666");
|
||||||
assertArrayEquals(expectedSplit1, threeRegionsSplits[1]);
|
assertArrayEquals(expectedSplit1, threeRegionsSplits[1]);
|
||||||
|
|
||||||
// Check splitting existing regions that have start and end points
|
// Check splitting existing regions that have start and end points
|
||||||
byte[] splitPoint = splitter.split("10000000".getBytes(), "30000000".getBytes());
|
byte[] splitPoint = splitter.split(Bytes.toBytes("10000000"), Bytes.toBytes("30000000"));
|
||||||
assertArrayEquals("20000000".getBytes(), splitPoint);
|
assertArrayEquals(Bytes.toBytes("20000000"), splitPoint);
|
||||||
|
|
||||||
byte[] lastRow = "99999999".getBytes();
|
byte[] lastRow = Bytes.toBytes("99999999");
|
||||||
assertArrayEquals(lastRow, splitter.lastRow());
|
assertArrayEquals(lastRow, splitter.lastRow());
|
||||||
byte[] firstRow = "00000000".getBytes();
|
byte[] firstRow = Bytes.toBytes("00000000");
|
||||||
assertArrayEquals(firstRow, splitter.firstRow());
|
assertArrayEquals(firstRow, splitter.firstRow());
|
||||||
|
|
||||||
// Halfway between 00... and 20... should be 10...
|
// Halfway between 00... and 20... should be 10...
|
||||||
splitPoint = splitter.split(firstRow, "20000000".getBytes());
|
splitPoint = splitter.split(firstRow, Bytes.toBytes("20000000"));
|
||||||
assertArrayEquals("10000000".getBytes(), splitPoint);
|
assertArrayEquals(Bytes.toBytes("10000000"), splitPoint);
|
||||||
|
|
||||||
// Halfway between 00... and 19... should be 09...
|
// Halfway between 00... and 19... should be 09...
|
||||||
splitPoint = splitter.split(firstRow, "19999999".getBytes());
|
splitPoint = splitter.split(firstRow, Bytes.toBytes("19999999"));
|
||||||
assertArrayEquals("09999999".getBytes(), splitPoint);
|
assertArrayEquals(Bytes.toBytes("09999999"), splitPoint);
|
||||||
|
|
||||||
// Halfway between 79... and 99... should be 89....
|
// Halfway between 79... and 99... should be 89....
|
||||||
splitPoint = splitter.split("79999999".getBytes(), lastRow);
|
splitPoint = splitter.split(Bytes.toBytes("79999999"), lastRow);
|
||||||
assertArrayEquals("89999999".getBytes(), splitPoint);
|
assertArrayEquals(Bytes.toBytes("89999999"), splitPoint);
|
||||||
|
|
||||||
// Check splitting region with multiple mappers per region
|
// Check splitting region with multiple mappers per region
|
||||||
byte[][] splits = splitter.split("00000000".getBytes(), "30000000".getBytes(), 3, false);
|
byte[][] splits = splitter.split(Bytes.toBytes("00000000"), Bytes.toBytes("30000000"),
|
||||||
|
3, false);
|
||||||
assertEquals(2, splits.length);
|
assertEquals(2, splits.length);
|
||||||
assertArrayEquals("10000000".getBytes(), splits[0]);
|
assertArrayEquals(Bytes.toBytes("10000000"), splits[0]);
|
||||||
assertArrayEquals("20000000".getBytes(), splits[1]);
|
assertArrayEquals(Bytes.toBytes("20000000"), splits[1]);
|
||||||
|
|
||||||
splits = splitter.split("00000000".getBytes(), "20000000".getBytes(), 2, true);
|
splits = splitter.split(Bytes.toBytes("00000000"), Bytes.toBytes("20000000"), 2, true);
|
||||||
assertEquals(3, splits.length);
|
assertEquals(3, splits.length);
|
||||||
assertArrayEquals("10000000".getBytes(), splits[1]);
|
assertArrayEquals(Bytes.toBytes("10000000"), splits[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -278,14 +280,14 @@ public class TestRegionSplitter {
|
||||||
|
|
||||||
splitPoint = splitter.split(new byte[] {(byte)0xdf, xFF, xFF, xFF, xFF,
|
splitPoint = splitter.split(new byte[] {(byte)0xdf, xFF, xFF, xFF, xFF,
|
||||||
xFF, xFF, xFF}, lastRow);
|
xFF, xFF, xFF}, lastRow);
|
||||||
assertArrayEquals(splitPoint, new byte[] { (byte) 0xef, xFF, xFF, xFF, xFF, xFF, xFF, xFF
|
assertArrayEquals(splitPoint, new byte[] { (byte) 0xef, xFF, xFF, xFF, xFF, xFF, xFF, xFF});
|
||||||
});
|
|
||||||
|
|
||||||
splitPoint = splitter.split(new byte[] {'a', 'a', 'a'}, new byte[] {'a', 'a', 'b'});
|
splitPoint = splitter.split(new byte[] {'a', 'a', 'a'}, new byte[] {'a', 'a', 'b'});
|
||||||
assertArrayEquals(splitPoint, new byte[] { 'a', 'a', 'a', (byte) 0x80 });
|
assertArrayEquals(splitPoint, new byte[] { 'a', 'a', 'a', (byte) 0x80 });
|
||||||
|
|
||||||
// Check splitting region with multiple mappers per region
|
// Check splitting region with multiple mappers per region
|
||||||
byte[][] splits = splitter.split(new byte[] {'a', 'a', 'a'}, new byte[] {'a', 'a', 'd'}, 3, false);
|
byte[][] splits = splitter.split(new byte[] {'a', 'a', 'a'}, new byte[] {'a', 'a', 'd'},
|
||||||
|
3, false);
|
||||||
assertEquals(2, splits.length);
|
assertEquals(2, splits.length);
|
||||||
assertArrayEquals(splits[0], new byte[]{'a', 'a', 'b'});
|
assertArrayEquals(splits[0], new byte[]{'a', 'a', 'b'});
|
||||||
assertArrayEquals(splits[1], new byte[]{'a', 'a', 'c'});
|
assertArrayEquals(splits[1], new byte[]{'a', 'a', 'c'});
|
||||||
|
@ -425,10 +427,8 @@ public class TestRegionSplitter {
|
||||||
/**
|
/**
|
||||||
* List.indexOf() doesn't really work for a List<byte[]>, because byte[]
|
* List.indexOf() doesn't really work for a List<byte[]>, because byte[]
|
||||||
* doesn't override equals(). This method checks whether a list contains
|
* doesn't override equals(). This method checks whether a list contains
|
||||||
* a given element by checking each element using the byte array
|
* a given element by checking each element using the byte array comparator.
|
||||||
* comparator.
|
* @return the index of the first element that equals compareTo, or -1 if no elements are equal.
|
||||||
* @return the index of the first element that equals compareTo, or -1
|
|
||||||
* if no elements are equal.
|
|
||||||
*/
|
*/
|
||||||
static private int indexOfBytes(List<byte[]> list, byte[] compareTo) {
|
static private int indexOfBytes(List<byte[]> list, byte[] compareTo) {
|
||||||
int listIndex = 0;
|
int listIndex = 0;
|
||||||
|
|
|
@ -22,8 +22,10 @@ import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileWriter;
|
import java.io.FileOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.OutputStreamWriter;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import javax.security.auth.login.AppConfigurationEntry;
|
import javax.security.auth.login.AppConfigurationEntry;
|
||||||
|
@ -65,9 +67,9 @@ public class TestZooKeeperACL {
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUpBeforeClass() throws Exception {
|
public static void setUpBeforeClass() throws Exception {
|
||||||
File saslConfFile = File.createTempFile("tmp", "jaas.conf");
|
File saslConfFile = File.createTempFile("tmp", "jaas.conf");
|
||||||
FileWriter fwriter = new FileWriter(saslConfFile);
|
try (OutputStreamWriter fwriter = new OutputStreamWriter(
|
||||||
|
new FileOutputStream(saslConfFile), StandardCharsets.UTF_8)) {
|
||||||
fwriter.write("" +
|
fwriter.write(
|
||||||
"Server {\n" +
|
"Server {\n" +
|
||||||
"org.apache.zookeeper.server.auth.DigestLoginModule required\n" +
|
"org.apache.zookeeper.server.auth.DigestLoginModule required\n" +
|
||||||
"user_hbase=\"secret\";\n" +
|
"user_hbase=\"secret\";\n" +
|
||||||
|
@ -77,7 +79,7 @@ public class TestZooKeeperACL {
|
||||||
"username=\"hbase\"\n" +
|
"username=\"hbase\"\n" +
|
||||||
"password=\"secret\";\n" +
|
"password=\"secret\";\n" +
|
||||||
"};" + "\n");
|
"};" + "\n");
|
||||||
fwriter.close();
|
}
|
||||||
System.setProperty("java.security.auth.login.config",
|
System.setProperty("java.security.auth.login.config",
|
||||||
saslConfFile.getAbsolutePath());
|
saslConfFile.getAbsolutePath());
|
||||||
System.setProperty("zookeeper.authProvider.1",
|
System.setProperty("zookeeper.authProvider.1",
|
||||||
|
@ -279,10 +281,11 @@ public class TestZooKeeperACL {
|
||||||
assertEquals(testJaasConfig, secureZKAvailable);
|
assertEquals(testJaasConfig, secureZKAvailable);
|
||||||
// Define Jaas configuration without ZooKeeper Jaas config
|
// Define Jaas configuration without ZooKeeper Jaas config
|
||||||
File saslConfFile = File.createTempFile("tmp", "fakeJaas.conf");
|
File saslConfFile = File.createTempFile("tmp", "fakeJaas.conf");
|
||||||
FileWriter fwriter = new FileWriter(saslConfFile);
|
try (OutputStreamWriter fwriter = new OutputStreamWriter(
|
||||||
|
new FileOutputStream(saslConfFile), StandardCharsets.UTF_8)) {
|
||||||
fwriter.write("");
|
fwriter.write("");
|
||||||
fwriter.close();
|
}
|
||||||
|
|
||||||
System.setProperty("java.security.auth.login.config",
|
System.setProperty("java.security.auth.login.config",
|
||||||
saslConfFile.getAbsolutePath());
|
saslConfFile.getAbsolutePath());
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@ import java.util.Random;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.zookeeper.server.NIOServerCnxnFactory;
|
import org.apache.zookeeper.server.NIOServerCnxnFactory;
|
||||||
import org.apache.zookeeper.server.ZooKeeperServer;
|
import org.apache.zookeeper.server.ZooKeeperServer;
|
||||||
|
@ -54,6 +55,7 @@ public class MiniZooKeeperCluster {
|
||||||
|
|
||||||
private static final int TICK_TIME = 2000;
|
private static final int TICK_TIME = 2000;
|
||||||
private static final int DEFAULT_CONNECTION_TIMEOUT = 30000;
|
private static final int DEFAULT_CONNECTION_TIMEOUT = 30000;
|
||||||
|
private static final byte[] STATIC_BYTES = Bytes.toBytes("stat");
|
||||||
private int connectionTimeout;
|
private int connectionTimeout;
|
||||||
|
|
||||||
private boolean started;
|
private boolean started;
|
||||||
|
@ -406,7 +408,7 @@ public class MiniZooKeeperCluster {
|
||||||
Socket sock = new Socket("localhost", port);
|
Socket sock = new Socket("localhost", port);
|
||||||
try {
|
try {
|
||||||
OutputStream outstream = sock.getOutputStream();
|
OutputStream outstream = sock.getOutputStream();
|
||||||
outstream.write("stat".getBytes());
|
outstream.write(STATIC_BYTES);
|
||||||
outstream.flush();
|
outstream.flush();
|
||||||
} finally {
|
} finally {
|
||||||
sock.close();
|
sock.close();
|
||||||
|
@ -436,7 +438,7 @@ public class MiniZooKeeperCluster {
|
||||||
BufferedReader reader = null;
|
BufferedReader reader = null;
|
||||||
try {
|
try {
|
||||||
OutputStream outstream = sock.getOutputStream();
|
OutputStream outstream = sock.getOutputStream();
|
||||||
outstream.write("stat".getBytes());
|
outstream.write(STATIC_BYTES);
|
||||||
outstream.flush();
|
outstream.flush();
|
||||||
|
|
||||||
Reader isr = new InputStreamReader(sock.getInputStream());
|
Reader isr = new InputStreamReader(sock.getInputStream());
|
||||||
|
|
|
@ -81,14 +81,14 @@ public class TestRecoverableZooKeeper {
|
||||||
String ensemble = ZKConfig.getZKQuorumServersString(conf);
|
String ensemble = ZKConfig.getZKQuorumServersString(conf);
|
||||||
RecoverableZooKeeper rzk = ZKUtil.connect(conf, ensemble, zkw);
|
RecoverableZooKeeper rzk = ZKUtil.connect(conf, ensemble, zkw);
|
||||||
rzk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
|
rzk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
|
||||||
rzk.setData(znode, "OPENING".getBytes(), 0);
|
rzk.setData(znode, Bytes.toBytes("OPENING"), 0);
|
||||||
Field zkField = RecoverableZooKeeper.class.getDeclaredField("zk");
|
Field zkField = RecoverableZooKeeper.class.getDeclaredField("zk");
|
||||||
zkField.setAccessible(true);
|
zkField.setAccessible(true);
|
||||||
int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
|
int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
|
||||||
ZookeeperStub zkStub = new ZookeeperStub(ensemble, timeout, zkw);
|
ZookeeperStub zkStub = new ZookeeperStub(ensemble, timeout, zkw);
|
||||||
zkStub.setThrowExceptionInNumOperations(1);
|
zkStub.setThrowExceptionInNumOperations(1);
|
||||||
zkField.set(rzk, zkStub);
|
zkField.set(rzk, zkStub);
|
||||||
byte[] opened = "OPENED".getBytes();
|
byte[] opened = Bytes.toBytes("OPENED");
|
||||||
rzk.setData(znode, opened, 1);
|
rzk.setData(znode, opened, 1);
|
||||||
byte[] data = rzk.getData(znode, false, new Stat());
|
byte[] data = rzk.getData(znode, false, new Stat());
|
||||||
assertTrue(Bytes.equals(opened, data));
|
assertTrue(Bytes.equals(opened, data));
|
||||||
|
|
|
@ -157,7 +157,7 @@ public class TestZKUtil {
|
||||||
String quorumServers = ZKConfig.getZKQuorumServersString(c);
|
String quorumServers = ZKConfig.getZKQuorumServersString(c);
|
||||||
int sessionTimeout = 5 * 1000; // 5 seconds
|
int sessionTimeout = 5 * 1000; // 5 seconds
|
||||||
ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance);
|
ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance);
|
||||||
zk.addAuthInfo("digest", "hbase:rox".getBytes());
|
zk.addAuthInfo("digest", Bytes.toBytes("hbase:rox"));
|
||||||
|
|
||||||
// Save the previous ACL
|
// Save the previous ACL
|
||||||
Stat s = null;
|
Stat s = null;
|
||||||
|
@ -223,7 +223,7 @@ public class TestZKUtil {
|
||||||
|
|
||||||
// Restore the ACL
|
// Restore the ACL
|
||||||
ZooKeeper zk3 = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance);
|
ZooKeeper zk3 = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance);
|
||||||
zk3.addAuthInfo("digest", "hbase:rox".getBytes());
|
zk3.addAuthInfo("digest", Bytes.toBytes("hbase:rox"));
|
||||||
try {
|
try {
|
||||||
zk3.setACL("/", oldACL, -1);
|
zk3.setACL("/", oldACL, -1);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
Loading…
Reference in New Issue