HBASE-14675 Exorcise deprecated Put#add(...) and replace with Put#addColumn(...)

This commit is contained in:
Jonathan M Hsieh 2015-10-25 14:56:12 -07:00
parent 452e38ff80
commit de9555cec4
181 changed files with 1363 additions and 1378 deletions

View File

@ -1713,7 +1713,7 @@ public class MetaTableAccessor {
public static Put makePutFromTableState(TableState state) {
long time = EnvironmentEdgeManager.currentTime();
Put put = new Put(state.getTableName().getName(), time);
put.add(getTableFamily(), getStateColumn(), state.convert().toByteArray());
put.addColumn(getTableFamily(), getStateColumn(), state.convert().toByteArray());
return put;
}

View File

@ -44,8 +44,8 @@ import org.apache.hadoop.hbase.util.Bytes;
* Used to perform Put operations for a single row.
* <p>
* To perform a Put, instantiate a Put object with the row to insert to and
* for eachumn to be inserted, execute {@link #add(byte[], byte[], byte[]) add} or
* {@link #add(byte[], byte[], long, byte[]) add} if setting the timestamp.
* for eachumn to be inserted, execute {@link #addColumn(byte[], byte[], byte[]) add} or
* {@link #addColumn(byte[], byte[], long, byte[]) add} if setting the timestamp.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
@ -131,19 +131,6 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
}
}
/**
* Add the specified column and value to this Put operation.
* @param family family name
* @param qualifier column qualifier
* @param value column value
* @return this
* @deprecated Since 1.0.0. Use {@link #addColumn(byte[], byte[], byte[])}
*/
@Deprecated
public Put add(byte [] family, byte [] qualifier, byte [] value) {
return addColumn(family, qualifier, value);
}
/**
* Add the specified column and value to this Put operation.
* @param family family name
@ -156,7 +143,7 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
}
/**
* See {@link #add(byte[], byte[], byte[])}. This version expects
* See {@link #addColumn(byte[], byte[], byte[])}. This version expects
* that the underlying arrays won't change. It's intended
* for usage internal HBase to and for advanced client applications.
*/
@ -175,21 +162,6 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
return addImmutable(family, qualifier, this.ts, value, tag);
}
/**
* Add the specified column and value, with the specified timestamp as
* its version to this Put operation.
* @param family family name
* @param qualifier column qualifier
* @param ts version timestamp
* @param value column value
* @return this
* @deprecated Since 1.0.0. Use {@link #addColumn(byte[], byte[], long, byte[])}
*/
@Deprecated
public Put add(byte [] family, byte [] qualifier, long ts, byte [] value) {
return addColumn(family, qualifier, ts, value);
}
/**
* Add the specified column and value, with the specified timestamp as
* its version to this Put operation.
@ -211,7 +183,7 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
}
/**
* See {@link #add(byte[], byte[], long, byte[])}. This version expects
* See {@link #addColumn(byte[], byte[], long, byte[])}. This version expects
* that the underlying arrays won't change. It's intended
* for usage internal HBase to and for advanced client applications.
*/
@ -261,21 +233,6 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
}
/**
* Add the specified column and value, with the specified timestamp as
* its version to this Put operation.
* @param family family name
* @param qualifier column qualifier
* @param ts version timestamp
* @param value column value
* @return this
* @deprecated Since 1.0.0. Use {@link Put#addColumn(byte[], ByteBuffer, long, ByteBuffer)}
*/
@Deprecated
public Put add(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) {
return addColumn(family, qualifier, ts, value);
}
/**
* Add the specified column and value, with the specified timestamp as
* its version to this Put operation.
@ -297,7 +254,7 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
}
/**
* See {@link #add(byte[], ByteBuffer, long, ByteBuffer)}. This version expects
* See {@link #addColumn(byte[], ByteBuffer, long, ByteBuffer)}. This version expects
* that the underlying arrays won't change. It's intended
* for usage internal HBase to and for advanced client applications.
*/

View File

@ -1071,7 +1071,7 @@ public class TestAsyncProcess {
throw new IllegalArgumentException("unknown " + regCnt);
}
p.add(DUMMY_BYTES_1, DUMMY_BYTES_1, DUMMY_BYTES_1);
p.addColumn(DUMMY_BYTES_1, DUMMY_BYTES_1, DUMMY_BYTES_1);
return p;
}

View File

@ -703,7 +703,7 @@ public class TestClientNoCluster extends Configured implements Tool {
for (int i = 0; i < namespaceSpan; i++) {
byte [] b = format(rd.nextLong());
Put p = new Put(b);
p.add(HConstants.CATALOG_FAMILY, b, b);
p.addColumn(HConstants.CATALOG_FAMILY, b, b);
mutator.mutate(p);
if (i % printInterval == 0) {
LOG.info("Put " + printInterval + "/" + stopWatch.elapsedMillis());

View File

@ -337,7 +337,7 @@ public class TestOperation {
// produce a Put operation
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, VALUE);
put.addColumn(FAMILY, QUALIFIER, VALUE);
// get its JSON representation, and parse it
json = put.toJSON();
parsedJSON = mapper.readValue(json, HashMap.class);

View File

@ -41,7 +41,7 @@ public class TestPutDotHas {
@Before
public void setUp() {
put.add(FAMILY_01, QUALIFIER_01, TS, VALUE_01);
put.addColumn(FAMILY_01, QUALIFIER_01, TS, VALUE_01);
}
@Test

View File

@ -89,7 +89,7 @@ public class IndexBuilder extends Configured implements Tool {
// original: row 123 attribute:phone 555-1212
// index: row 555-1212 INDEX:ROW 123
Put put = new Put(value);
put.add(INDEX_COLUMN, INDEX_QUALIFIER, rowKey.get());
put.addColumn(INDEX_COLUMN, INDEX_QUALIFIER, rowKey.get());
context.write(tableName, put);
}
}

View File

@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
@ -91,7 +90,7 @@ public class SampleUploader extends Configured implements Tool {
// Create Put
Put put = new Put(row);
put.add(family, qualifier, value);
put.addColumn(family, qualifier, value);
// Uncomment below to disable WAL. This will improve performance but means
// you will experience data loss in the case of a RegionServer crash.

View File

@ -223,8 +223,8 @@ public class TestBulkDeleteProtocol {
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
Put put = new Put(Bytes.toBytes(j));
put.add(FAMILY1, QUALIFIER1, "v1".getBytes());
put.add(FAMILY2, QUALIFIER2, "v2".getBytes());
put.addColumn(FAMILY1, QUALIFIER1, "v1".getBytes());
put.addColumn(FAMILY2, QUALIFIER2, "v2".getBytes());
puts.add(put);
}
ht.put(puts);
@ -251,15 +251,15 @@ public class TestBulkDeleteProtocol {
for (int j = 0; j < 100; j++) {
Put put = new Put(Bytes.toBytes(j));
byte[] value = "v1".getBytes();
put.add(FAMILY1, QUALIFIER1, 1234L, value);
put.add(FAMILY1, QUALIFIER2, 1234L, value);
put.add(FAMILY1, QUALIFIER3, 1234L, value);
put.addColumn(FAMILY1, QUALIFIER1, 1234L, value);
put.addColumn(FAMILY1, QUALIFIER2, 1234L, value);
put.addColumn(FAMILY1, QUALIFIER3, 1234L, value);
// Latest version values
value = "v2".getBytes();
put.add(FAMILY1, QUALIFIER1, value);
put.add(FAMILY1, QUALIFIER2, value);
put.add(FAMILY1, QUALIFIER3, value);
put.add(FAMILY1, null, value);
put.addColumn(FAMILY1, QUALIFIER1, value);
put.addColumn(FAMILY1, QUALIFIER2, value);
put.addColumn(FAMILY1, QUALIFIER3, value);
put.addColumn(FAMILY1, null, value);
puts.add(put);
}
ht.put(puts);
@ -300,19 +300,19 @@ public class TestBulkDeleteProtocol {
Put put = new Put(Bytes.toBytes(j));
// TS = 1000L
byte[] value = "v1".getBytes();
put.add(FAMILY1, QUALIFIER1, 1000L, value);
put.add(FAMILY1, QUALIFIER2, 1000L, value);
put.add(FAMILY1, QUALIFIER3, 1000L, value);
put.addColumn(FAMILY1, QUALIFIER1, 1000L, value);
put.addColumn(FAMILY1, QUALIFIER2, 1000L, value);
put.addColumn(FAMILY1, QUALIFIER3, 1000L, value);
// TS = 1234L
value = "v2".getBytes();
put.add(FAMILY1, QUALIFIER1, 1234L, value);
put.add(FAMILY1, QUALIFIER2, 1234L, value);
put.add(FAMILY1, QUALIFIER3, 1234L, value);
put.addColumn(FAMILY1, QUALIFIER1, 1234L, value);
put.addColumn(FAMILY1, QUALIFIER2, 1234L, value);
put.addColumn(FAMILY1, QUALIFIER3, 1234L, value);
// Latest version values
value = "v3".getBytes();
put.add(FAMILY1, QUALIFIER1, value);
put.add(FAMILY1, QUALIFIER2, value);
put.add(FAMILY1, QUALIFIER3, value);
put.addColumn(FAMILY1, QUALIFIER1, value);
put.addColumn(FAMILY1, QUALIFIER2, value);
put.addColumn(FAMILY1, QUALIFIER3, value);
puts.add(put);
}
ht.put(puts);
@ -347,24 +347,24 @@ public class TestBulkDeleteProtocol {
Put put = new Put(Bytes.toBytes(j));
// TS = 1000L
byte[] value = "v1".getBytes();
put.add(FAMILY1, QUALIFIER1, 1000L, value);
put.add(FAMILY1, QUALIFIER2, 1000L, value);
put.add(FAMILY1, QUALIFIER3, 1000L, value);
put.addColumn(FAMILY1, QUALIFIER1, 1000L, value);
put.addColumn(FAMILY1, QUALIFIER2, 1000L, value);
put.addColumn(FAMILY1, QUALIFIER3, 1000L, value);
// TS = 1234L
value = "v2".getBytes();
put.add(FAMILY1, QUALIFIER1, 1234L, value);
put.add(FAMILY1, QUALIFIER2, 1234L, value);
put.add(FAMILY1, QUALIFIER3, 1234L, value);
put.addColumn(FAMILY1, QUALIFIER1, 1234L, value);
put.addColumn(FAMILY1, QUALIFIER2, 1234L, value);
put.addColumn(FAMILY1, QUALIFIER3, 1234L, value);
// TS = 2000L
value = "v3".getBytes();
put.add(FAMILY1, QUALIFIER1, 2000L, value);
put.add(FAMILY1, QUALIFIER2, 2000L, value);
put.add(FAMILY1, QUALIFIER3, 2000L, value);
put.addColumn(FAMILY1, QUALIFIER1, 2000L, value);
put.addColumn(FAMILY1, QUALIFIER2, 2000L, value);
put.addColumn(FAMILY1, QUALIFIER3, 2000L, value);
// Latest version values
value = "v4".getBytes();
put.add(FAMILY1, QUALIFIER1, value);
put.add(FAMILY1, QUALIFIER2, value);
put.add(FAMILY1, QUALIFIER3, value);
put.addColumn(FAMILY1, QUALIFIER1, value);
put.addColumn(FAMILY1, QUALIFIER2, value);
put.addColumn(FAMILY1, QUALIFIER3, value);
puts.add(put);
}
ht.put(puts);
@ -435,9 +435,9 @@ public class TestBulkDeleteProtocol {
private Put createPut(byte[] rowkey, String value) throws IOException {
Put put = new Put(rowkey);
put.add(FAMILY1, QUALIFIER1, value.getBytes());
put.add(FAMILY1, QUALIFIER2, value.getBytes());
put.add(FAMILY1, QUALIFIER3, value.getBytes());
put.addColumn(FAMILY1, QUALIFIER1, value.getBytes());
put.addColumn(FAMILY1, QUALIFIER2, value.getBytes());
put.addColumn(FAMILY1, QUALIFIER3, value.getBytes());
return put;
}
}

View File

@ -76,7 +76,7 @@ public class TestRowCountEndpoint {
for (int i=0; i<5; i++) {
byte[] iBytes = Bytes.toBytes(i);
Put p = new Put(iBytes);
p.add(TEST_FAMILY, TEST_COLUMN, iBytes);
p.addColumn(TEST_FAMILY, TEST_COLUMN, iBytes);
table.put(p);
}

View File

@ -93,10 +93,10 @@ public class TestZooKeeperScanPolicyObserver {
long ts = now - 2000;
Put p = new Put(R);
p.add(F, Q, ts, Q);
p.addColumn(F, Q, ts, Q);
t.put(p);
p = new Put(R);
p.add(F, Q, ts+1, Q);
p.addColumn(F, Q, ts + 1, Q);
t.put(p);
// these two should be expired but for the override

View File

@ -496,7 +496,7 @@ public class IntegrationTestMTTR {
@Override
protected boolean doAction() throws Exception {
Put p = new Put(Bytes.toBytes(RandomStringUtils.randomAlphanumeric(5)));
p.add(FAMILY, Bytes.toBytes("\0"), Bytes.toBytes(RandomStringUtils.randomAscii(5)));
p.addColumn(FAMILY, Bytes.toBytes("\0"), Bytes.toBytes(RandomStringUtils.randomAscii(5)));
table.put(p);
return true;
}

View File

@ -211,13 +211,14 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB
for (int i = 0; i < current.length; i++) {
for (int j = 0; j < DEFAULT_TABLES_COUNT; j++) {
Put put = new Put(current[i]);
put.add(FAMILY_NAME, COLUMN_PREV, prev == null ? NO_KEY : prev[i]);
byte[] value = prev == null ? NO_KEY : prev[i];
put.addColumn(FAMILY_NAME, COLUMN_PREV, value);
if (count >= 0) {
put.add(FAMILY_NAME, COLUMN_COUNT, Bytes.toBytes(count + i));
put.addColumn(FAMILY_NAME, COLUMN_COUNT, Bytes.toBytes(count + i));
}
if (id != null) {
put.add(FAMILY_NAME, COLUMN_CLIENT, id);
put.addColumn(FAMILY_NAME, COLUMN_CLIENT, id);
}
visibilityExps = split[j * 2] + OR + split[(j * 2) + 1];
put.setCellVisibility(new CellVisibility(visibilityExps));

View File

@ -242,12 +242,12 @@ public void cleanUpCluster() throws Exception {
Bytes.putLong(row, 0, byteSwapped);
Put p = new Put(row);
p.add(TEST_FAMILY, TEST_QUALIFIER, HConstants.EMPTY_BYTE_ARRAY);
p.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.EMPTY_BYTE_ARRAY);
if (blockStart > 0) {
for (int j = 0; j < numBackReferencesPerRow; j++) {
long referredRow = blockStart - BLOCK_SIZE + rand.nextInt(BLOCK_SIZE);
Bytes.putLong(row, 0, swapLong(referredRow));
p.add(TEST_FAMILY, row, HConstants.EMPTY_BYTE_ARRAY);
p.addColumn(TEST_FAMILY, row, HConstants.EMPTY_BYTE_ARRAY);
}
refsWritten.increment(1);
}

View File

@ -174,7 +174,7 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
String exp = VISIBILITY_EXPS[expIdx];
byte[] row = Bytes.add(Bytes.toBytes(i), Bytes.toBytes(suffix), Bytes.toBytes(exp));
Put p = new Put(row);
p.add(TEST_FAMILY, TEST_QUALIFIER, HConstants.EMPTY_BYTE_ARRAY);
p.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.EMPTY_BYTE_ARRAY);
p.setCellVisibility(new CellVisibility(exp));
getCounter(expIdx).increment(1);
mutator.mutate(p);

View File

@ -245,7 +245,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
Put p = new Put(Bytes.toBytes(rk));
for (int y = 0; y < 10; y++) {
random.nextBytes(value);
p.add(familyName, Bytes.toBytes(random.nextLong()), value);
p.addColumn(familyName, Bytes.toBytes(random.nextLong()), value);
}
ht.mutate(p);
}

View File

@ -1131,7 +1131,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
value, tags);
put.add(kv);
} else {
put.add(FAMILY_NAME, QUALIFIER_NAME, value);
put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value);
}
put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
mutator.mutate(put);
@ -1202,7 +1202,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
value, tags);
put.add(kv);
} else {
put.add(FAMILY_NAME, QUALIFIER_NAME, value);
put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value);
}
put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
mutator.mutate(put);

View File

@ -100,7 +100,7 @@ public class TestScannerResource {
k[2] = b3;
Put put = new Put(k);
put.setDurability(Durability.SKIP_WAL);
put.add(famAndQf[0], famAndQf[1], k);
put.addColumn(famAndQf[0], famAndQf[1], k);
puts.add(put);
}
}

View File

@ -149,7 +149,7 @@ public class TestScannersWithFilters {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for(byte [] QUALIFIER : QUALIFIERS_ONE) {
p.add(FAMILIES[0], QUALIFIER, VALUES[0]);
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[0]);
}
table.put(p);
}
@ -157,7 +157,7 @@ public class TestScannersWithFilters {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for(byte [] QUALIFIER : QUALIFIERS_TWO) {
p.add(FAMILIES[1], QUALIFIER, VALUES[1]);
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]);
}
table.put(p);
}
@ -167,7 +167,7 @@ public class TestScannersWithFilters {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for(byte [] QUALIFIER : QUALIFIERS_ONE) {
p.add(FAMILIES[1], QUALIFIER, VALUES[0]);
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[0]);
}
table.put(p);
}
@ -175,7 +175,7 @@ public class TestScannersWithFilters {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for(byte [] QUALIFIER : QUALIFIERS_TWO) {
p.add(FAMILIES[0], QUALIFIER, VALUES[1]);
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[1]);
}
table.put(p);
}

View File

@ -98,7 +98,7 @@ public class TestScannersWithLabels {
for (int i = 0; i < 9; i++) {
Put put = new Put(Bytes.toBytes("row" + i));
put.setDurability(Durability.SKIP_WAL);
put.add(famAndQf[0], famAndQf[1], k);
put.addColumn(famAndQf[0], famAndQf[1], k);
put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
+ TOPSECRET));
puts.add(put);

View File

@ -108,7 +108,7 @@ public class TestTableResource {
k[2] = b3;
Put put = new Put(k);
put.setDurability(Durability.SKIP_WAL);
put.add(famAndQf[0], famAndQf[1], k);
put.addColumn(famAndQf[0], famAndQf[1], k);
puts.add(put);
}
}

View File

@ -151,7 +151,7 @@ public class TestRemoteHTableRetries {
@Override
public void run() throws Exception {
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, put );
}
});
@ -164,7 +164,7 @@ public class TestRemoteHTableRetries {
@Override
public void run() throws Exception {
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
Delete delete= new Delete(ROW_1);
remoteTable.checkAndDelete(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, delete );
}

View File

@ -100,12 +100,12 @@ public class TestRemoteTable {
admin.createTable(htd);
try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) {
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
table.put(put);
put = new Put(ROW_2);
put.add(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2);
put.add(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
put.addColumn(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2);
put.addColumn(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
table.put(put);
}
remoteTable = new RemoteHTable(
@ -282,7 +282,7 @@ public class TestRemoteTable {
@Test
public void testPut() throws IOException {
Put put = new Put(ROW_3);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
remoteTable.put(put);
Get get = new Get(ROW_3);
@ -296,13 +296,13 @@ public class TestRemoteTable {
List<Put> puts = new ArrayList<Put>();
put = new Put(ROW_3);
put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
puts.add(put);
put = new Put(ROW_4);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
puts.add(put);
put = new Put(ROW_4);
put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
puts.add(put);
remoteTable.put(puts);
@ -327,8 +327,8 @@ public class TestRemoteTable {
@Test
public void testDelete() throws IOException {
Put put = new Put(ROW_3);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
remoteTable.put(put);
Get get = new Get(ROW_3);
@ -390,16 +390,16 @@ public class TestRemoteTable {
public void testScanner() throws IOException {
List<Put> puts = new ArrayList<Put>();
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
puts.add(put);
put = new Put(ROW_2);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
puts.add(put);
put = new Put(ROW_3);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
puts.add(put);
put = new Put(ROW_4);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
puts.add(put);
remoteTable.put(puts);
@ -465,7 +465,7 @@ public class TestRemoteTable {
assertFalse(remoteTable.exists(get));
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
remoteTable.put(put);
assertTrue(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1,
@ -481,16 +481,16 @@ public class TestRemoteTable {
public void testIteratorScaner() throws IOException {
List<Put> puts = new ArrayList<Put>();
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
puts.add(put);
put = new Put(ROW_2);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
puts.add(put);
put = new Put(ROW_3);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
puts.add(put);
put = new Put(ROW_4);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
puts.add(put);
remoteTable.put(puts);

View File

@ -144,7 +144,7 @@ public class QuotaUtil extends QuotaTableUtil {
private static void addQuotas(final Connection connection, final byte[] rowKey,
final byte[] qualifier, final Quotas data) throws IOException {
Put put = new Put(rowKey);
put.add(QUOTA_FAMILY_INFO, qualifier, quotasToData(data));
put.addColumn(QUOTA_FAMILY_INFO, qualifier, quotasToData(data));
doPut(connection, put);
}

View File

@ -410,10 +410,10 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
// Put for parent
Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(copyOfMerged, time);
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
regionA.toByteArray());
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
regionB.toByteArray());
putOfMerged.addColumn(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
regionA.toByteArray());
putOfMerged.addColumn(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
regionB.toByteArray());
mutations.add(putOfMerged);
// Deletes for merging regions
Delete deleteA = MetaTableAccessor.makeDeleteFromRegionInfo(regionA, time);
@ -426,11 +426,11 @@ public class RegionMergeTransactionImpl implements RegionMergeTransaction {
@VisibleForTesting
Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes
.toBytes(sn.getHostAndPort()));
p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn
.getStartcode()));
p.add(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum));
p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes
.toBytes(sn.getHostAndPort()));
p.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn
.getStartcode()));
p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum));
return p;
}

View File

@ -329,11 +329,11 @@ public class SplitTransactionImpl implements SplitTransaction {
@VisibleForTesting
Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes
.toBytes(sn.getHostAndPort()));
p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn
.getStartcode()));
p.add(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum));
p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes
.toBytes(sn.getHostAndPort()));
p.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn
.getStartcode()));
p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum));
return p;
}

View File

@ -328,9 +328,10 @@ public abstract class HBaseTestCase extends TestCase {
byte[][] split =
KeyValue.parseColumn(Bytes.toBytes(sb.toString()));
if(split.length == 1) {
put.add(split[0], new byte[0], t);
byte[] qualifier = new byte[0];
put.addColumn(split[0], qualifier, t);
} else {
put.add(split[0], split[1], t);
put.addColumn(split[0], split[1], t);
}
put.setDurability(Durability.SKIP_WAL);
updater.put(put);

View File

@ -1951,7 +1951,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
Put put = new Put(row);
put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
for (int i = 0; i < f.length; i++) {
put.add(f[i], null, value != null ? value : row);
byte[] value1 = value != null ? value : row;
put.addColumn(f[i], null, value1);
}
puts.add(put);
}
@ -2038,7 +2039,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
k[2] = b3;
Put put = new Put(k);
put.setDurability(Durability.SKIP_WAL);
put.add(f, null, k);
put.addColumn(f, null, k);
if (r.getWAL() == null) {
put.setDurability(Durability.SKIP_WAL);
}
@ -2068,7 +2069,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
for (int i = startRow; i < endRow; i++) {
byte[] data = Bytes.toBytes(String.valueOf(i));
Put put = new Put(data);
put.add(f, null, data);
put.addColumn(f, null, data);
t.put(put);
}
}
@ -3415,7 +3416,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
"_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
ts + "_random_" + rand.nextLong());
put.add(cf, qual, ts, value);
put.addColumn(cf, qual, ts, value);
} else if (rand.nextDouble() < 0.8) {
del.deleteColumn(cf, qual, ts);
} else {

View File

@ -1413,7 +1413,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
put.add(kv);
updateValueSize(kv.getValueLength());
} else {
put.add(FAMILY_NAME, qualifier, value);
put.addColumn(FAMILY_NAME, qualifier, value);
updateValueSize(value.length);
}
}
@ -1501,7 +1501,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
put.add(kv);
updateValueSize(kv.getValueLength());
} else {
put.add(FAMILY_NAME, qualifier, value);
put.addColumn(FAMILY_NAME, qualifier, value);
updateValueSize(value.length);
}
}

View File

@ -135,7 +135,7 @@ public class TestAcidGuarantees implements Tool {
for (byte[] family : targetFamilies) {
for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
byte qualifier[] = Bytes.toBytes("col" + i);
p.add(family, qualifier, data);
p.addColumn(family, qualifier, data);
}
}
table.put(p);

View File

@ -98,7 +98,7 @@ public class TestHBaseTestingUtility {
Table table2 = htu2.createTable(TABLE_NAME, FAM_NAME);
Put put = new Put(ROW);
put.add(FAM_NAME, QUAL_NAME, VALUE);
put.addColumn(FAM_NAME, QUAL_NAME, VALUE);
table1.put(put);
Get get = new Get(ROW);

View File

@ -218,8 +218,7 @@ public class TestMultiVersions {
for (int i = 0; i < startKeys.length; i++) {
for (int j = 0; j < timestamp.length; j++) {
Put put = new Put(rows[i], timestamp[j]);
put.addColumn(HConstants.CATALOG_FAMILY, null, timestamp[j],
Bytes.toBytes(timestamp[j]));
put.addColumn(HConstants.CATALOG_FAMILY, null, timestamp[j], Bytes.toBytes(timestamp[j]));
puts.add(put);
}
}

View File

@ -248,7 +248,7 @@ public class TestNamespace {
//sanity check try to write and read from table
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
Put p = new Put(Bytes.toBytes("row1"));
p.add(Bytes.toBytes("my_cf"),Bytes.toBytes("my_col"),Bytes.toBytes("value1"));
p.addColumn(Bytes.toBytes("my_cf"), Bytes.toBytes("my_col"), Bytes.toBytes("value1"));
table.put(p);
//flush and read from disk to make sure directory changes are working
admin.flush(desc.getTableName());

View File

@ -261,8 +261,7 @@ public class TestZooKeeper {
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
Put put = new Put(Bytes.toBytes("testrow"));
put.add(Bytes.toBytes("fam"),
Bytes.toBytes("col"), Bytes.toBytes("testdata"));
put.addColumn(Bytes.toBytes("fam"), Bytes.toBytes("col"), Bytes.toBytes("testdata"));
LOG.info("Putting table " + tableName);
table.put(put);
table.close();
@ -611,7 +610,8 @@ public class TestZooKeeper {
int numberOfPuts;
for (numberOfPuts = 0; numberOfPuts < 6; numberOfPuts++) {
p = new Put(Bytes.toBytes(numberOfPuts));
p.add(Bytes.toBytes("col"), Bytes.toBytes("ql"), Bytes.toBytes("value" + numberOfPuts));
p.addColumn(Bytes.toBytes("col"), Bytes.toBytes("ql"),
Bytes.toBytes("value" + numberOfPuts));
table.put(p);
}
m.getZooKeeper().close();

View File

@ -403,7 +403,7 @@ public class TestZooKeeperTableArchiveClient {
private void createHFileInRegion(Region region, byte[] columnFamily) throws IOException {
// put one row in the region
Put p = new Put(Bytes.toBytes("row"));
p.add(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1"));
p.addColumn(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1"));
region.put(p);
// flush the region to make a store file
region.flush(true);

View File

@ -241,7 +241,7 @@ public class TestAdmin1 {
final TableName table = TableName.valueOf("testDisableAndEnableTable");
Table ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY);
Put put = new Put(row);
put.add(HConstants.CATALOG_FAMILY, qualifier, value);
put.addColumn(HConstants.CATALOG_FAMILY, qualifier, value);
ht.put(put);
Get get = new Get(row);
get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
@ -308,7 +308,7 @@ public class TestAdmin1 {
Table ht1 = TEST_UTIL.createTable(table1, HConstants.CATALOG_FAMILY);
Table ht2 = TEST_UTIL.createTable(table2, HConstants.CATALOG_FAMILY);
Put put = new Put(row);
put.add(HConstants.CATALOG_FAMILY, qualifier, value);
put.addColumn(HConstants.CATALOG_FAMILY, qualifier, value);
ht1.put(put);
ht2.put(put);
Get get = new Get(row);
@ -1187,13 +1187,13 @@ public class TestAdmin1 {
List<Put> puts = new ArrayList<Put>();
byte[] qualifier = "c".getBytes();
Put put = new Put(new byte[]{(byte)'1'});
put.add(cf, qualifier, "100".getBytes());
put.addColumn(cf, qualifier, "100".getBytes());
puts.add(put);
put = new Put(new byte[]{(byte)'6'});
put.add(cf, qualifier, "100".getBytes());
put.addColumn(cf, qualifier, "100".getBytes());
puts.add(put);
put = new Put(new byte[]{(byte)'8'});
put.add(cf, qualifier, "100".getBytes());
put.addColumn(cf, qualifier, "100".getBytes());
puts.add(put);
ht.put(puts);
ht.close();

View File

@ -218,7 +218,7 @@ public class TestAdmin2 {
byte[] value = Bytes.toBytes("somedata");
// This used to use an empty row... That must have been a bug
Put put = new Put(value);
put.add(HConstants.CATALOG_FAMILY, HConstants.CATALOG_FAMILY, value);
put.addColumn(HConstants.CATALOG_FAMILY, HConstants.CATALOG_FAMILY, value);
table.put(put);
table.close();
}
@ -614,7 +614,7 @@ public class TestAdmin2 {
HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName);
for (int i = 1; i <= 256; i++) { // 256 writes should cause 8 log rolls
Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", i)));
put.add(HConstants.CATALOG_FAMILY, null, value);
put.addColumn(HConstants.CATALOG_FAMILY, null, value);
table.put(put);
if (i % 32 == 0) {
// After every 32 writes sleep to let the log roller run

View File

@ -183,10 +183,10 @@ public class TestBlockEvictionFromClient {
// insert data. 2 Rows are added
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW1);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
// data was in memstore so don't expect any changes
@ -214,7 +214,7 @@ public class TestBlockEvictionFromClient {
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
byte[] data2 = Bytes.add(data, data);
put = new Put(ROW);
put.add(FAMILY, QUALIFIER2, data2);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
Result r = table.get(new Get(ROW));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
@ -332,16 +332,16 @@ public class TestBlockEvictionFromClient {
BlockCache cache = cacheConf.getBlockCache();
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
put = new Put(ROW1);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.add(FAMILY, QUALIFIER2, data2);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
region.flush(true);
// flush the data
@ -389,16 +389,16 @@ public class TestBlockEvictionFromClient {
regionName);
BlockCache cache = setCacheProperties(region);
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
put = new Put(ROW1);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
for (int i = 1; i < 10; i++) {
put = new Put(ROW);
put.add(FAMILY, Bytes.toBytes("testQualifier" + i), data2);
put.addColumn(FAMILY, Bytes.toBytes("testQualifier" + i), data2);
table.put(put);
if (i % 2 == 0) {
region.flush(true);
@ -406,7 +406,7 @@ public class TestBlockEvictionFromClient {
}
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.add(FAMILY, QUALIFIER2, data2);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
region.flush(true);
// flush the data
@ -483,16 +483,16 @@ public class TestBlockEvictionFromClient {
BlockCache cache = setCacheProperties(region);
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
put = new Put(ROW1);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
for (int i = 1; i < 10; i++) {
put = new Put(ROW);
put.add(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
put.addColumn(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
table.put(put);
if (i % 2 == 0) {
region.flush(true);
@ -501,7 +501,7 @@ public class TestBlockEvictionFromClient {
region.flush(true);
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.add(FAMILY, QUALIFIER2, data2);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
region.flush(true);
// flush the data
@ -577,16 +577,16 @@ public class TestBlockEvictionFromClient {
BlockCache cache = cacheConf.getBlockCache();
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
put = new Put(ROW1);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.add(FAMILY, QUALIFIER2, data2);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
region.flush(true);
// flush the data
@ -656,16 +656,16 @@ public class TestBlockEvictionFromClient {
BlockCache cache = setCacheProperties(region);
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
put = new Put(ROW1);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
for (int i = 1; i < 10; i++) {
put = new Put(ROW);
put.add(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
put.addColumn(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
table.put(put);
if (i % 2 == 0) {
region.flush(true);
@ -674,7 +674,7 @@ public class TestBlockEvictionFromClient {
region.flush(true);
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.add(FAMILY, QUALIFIER2, data2);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
region.flush(true);
// flush the data
@ -827,10 +827,10 @@ public class TestBlockEvictionFromClient {
// insert data. 2 Rows are added
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW1);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
// Should create one Hfile with 2 blocks
@ -842,7 +842,7 @@ public class TestBlockEvictionFromClient {
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
byte[] data2 = Bytes.add(data, data);
put = new Put(ROW);
put.add(FAMILY, QUALIFIER2, data2);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
// flush, one new block
System.out.println("Flushing cache");
@ -1042,14 +1042,14 @@ public class TestBlockEvictionFromClient {
private void insertData(HTable table) throws IOException {
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW1);
put.add(FAMILY, QUALIFIER, data);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.add(FAMILY, QUALIFIER2, data2);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
}

View File

@ -62,9 +62,9 @@ public class TestCheckAndMutate {
try {
// put one row
Put put = new Put(rowKey);
put.add(family, Bytes.toBytes("A"), Bytes.toBytes("a"));
put.add(family, Bytes.toBytes("B"), Bytes.toBytes("b"));
put.add(family, Bytes.toBytes("C"), Bytes.toBytes("c"));
put.addColumn(family, Bytes.toBytes("A"), Bytes.toBytes("a"));
put.addColumn(family, Bytes.toBytes("B"), Bytes.toBytes("b"));
put.addColumn(family, Bytes.toBytes("C"), Bytes.toBytes("c"));
table.put(put);
// get row back and assert the values
Get get = new Get(rowKey);
@ -102,7 +102,8 @@ public class TestCheckAndMutate {
//Test that we get a region level exception
try {
Put p = new Put(rowKey);
p.add(new byte[]{'b', 'o', 'g', 'u', 's'}, new byte[]{'A'}, new byte[0]);
byte[] value = new byte[0];
p.addColumn(new byte[]{'b', 'o', 'g', 'u', 's'}, new byte[]{'A'}, value);
rm = new RowMutations(rowKey);
rm.add(p);
table.checkAndMutate(rowKey, family, Bytes.toBytes("A"), CompareFilter.CompareOp.EQUAL,

View File

@ -85,7 +85,7 @@ public class TestClientOperationInterrupt {
Table ht = util.createTable(tableName, new byte[][]{dummy, test});
Put p = new Put(row1);
p.add(dummy, dummy, dummy);
p.addColumn(dummy, dummy, dummy);
ht.put(p);
}

View File

@ -126,7 +126,7 @@ public class TestClientScannerRPCTimeout {
private void putToTable(Table ht, byte[] rowkey) throws IOException {
Put put = new Put(rowkey);
put.add(FAMILY, QUALIFIER, VALUE);
put.addColumn(FAMILY, QUALIFIER, VALUE);
ht.put(put);
}

View File

@ -125,7 +125,7 @@ public class TestFastFail {
byte[] rowKey = longToByteArrayKey(i);
Put put = new Put(rowKey);
byte[] value = rowKey; // value is the same as the row key
put.add(FAMILY, QUALIFIER, value);
put.addColumn(FAMILY, QUALIFIER, value);
puts.add(put);
}
try (Table table = connection.getTable(TableName.valueOf(tableName))) {

View File

@ -109,7 +109,7 @@ public class TestFromClientSide3 {
for (int i = 0; i < nPuts; i++) {
byte[] qualifier = Bytes.toBytes(random.nextInt());
byte[] value = Bytes.toBytes(random.nextInt());
put.add(family, qualifier, value);
put.addColumn(family, qualifier, value);
}
table.put(put);
}
@ -274,7 +274,7 @@ public class TestFromClientSide3 {
actions.add(put1);
Put put2 = new Put(ANOTHERROW);
put2.add(FAMILY, QUALIFIER, VALUE);
put2.addColumn(FAMILY, QUALIFIER, VALUE);
actions.add(put2);
table.batch(actions, results);
@ -294,7 +294,7 @@ public class TestFromClientSide3 {
new byte[][] { FAMILY });
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, VALUE);
put.addColumn(FAMILY, QUALIFIER, VALUE);
Get get = new Get(ROW);
@ -312,7 +312,7 @@ public class TestFromClientSide3 {
"testHTableExistsMethodSingleRegionMultipleGets"), new byte[][] { FAMILY });
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, VALUE);
put.addColumn(FAMILY, QUALIFIER, VALUE);
table.put(put);
List<Get> gets = new ArrayList<Get>();
@ -406,7 +406,7 @@ public class TestFromClientSide3 {
TableName.valueOf("testHTableExistsMethodMultipleRegionsMultipleGets"),
new byte[][] { FAMILY }, 1, new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255);
Put put = new Put(ROW);
put.add(FAMILY, QUALIFIER, VALUE);
put.addColumn(FAMILY, QUALIFIER, VALUE);
table.put (put);
List<Get> gets = new ArrayList<Get>();
@ -424,7 +424,7 @@ public class TestFromClientSide3 {
// Test with the first region.
put = new Put(new byte[] { 0x00 });
put.add(FAMILY, QUALIFIER, VALUE);
put.addColumn(FAMILY, QUALIFIER, VALUE);
table.put(put);
gets = new ArrayList<Get>();
@ -436,7 +436,7 @@ public class TestFromClientSide3 {
// Test with the last region
put = new Put(new byte[] { (byte) 0xff, (byte) 0xff });
put.add(FAMILY, QUALIFIER, VALUE);
put.addColumn(FAMILY, QUALIFIER, VALUE);
table.put(put);
gets = new ArrayList<Get>();
@ -459,7 +459,7 @@ public class TestFromClientSide3 {
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
Put put = new Put(ROW_BYTES);
put.add(FAMILY, COL_QUAL, VAL_BYTES);
put.addColumn(FAMILY, COL_QUAL, VAL_BYTES);
table.put(put);
//Try getting the row with an empty row key

View File

@ -69,7 +69,9 @@ public class TestFromClientSideNoCodec {
// Check put and get.
final byte [] row = Bytes.toBytes("row");
Put p = new Put(row);
for (byte [] f: fs) p.add(f, f, f);
for (byte [] f: fs) {
p.addColumn(f, f, f);
}
ht.put(p);
Result r = ht.get(new Get(row));
int i = 0;

View File

@ -248,7 +248,7 @@ public class TestHCM {
}
Put p1 = new Put(rk);
p1.add(cf, "qual".getBytes(), "val".getBytes());
p1.addColumn(cf, "qual".getBytes(), "val".getBytes());
t.put(p1);
rs.getRegionServer().abort("I'm dead");
@ -430,7 +430,7 @@ public class TestHCM {
final Table table = connection.getTable(tableName);
Put put = new Put(ROW);
put.add(FAM_NAM, ROW, ROW);
put.addColumn(FAM_NAM, ROW, ROW);
table.put(put);
ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
@ -583,7 +583,7 @@ public class TestHCM {
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
Put put = new Put(ROW);
put.add(FAM_NAM, ROW, ROW);
put.addColumn(FAM_NAM, ROW, ROW);
table.put(put);
ConnectionImplementation conn = (ConnectionImplementation) connection;
@ -606,7 +606,7 @@ public class TestHCM {
conn.clearRegionCache(TABLE_NAME);
Assert.assertEquals(0, conn.getNumberOfCachedRegionLocations(TABLE_NAME));
Put put2 = new Put(ROW);
put2.add(FAM_NAM, ROW, ROW);
put2.addColumn(FAM_NAM, ROW, ROW);
table.put(put2);
assertNotNull(conn.getCachedLocation(TABLE_NAME, ROW));
assertNotNull(conn.getCachedLocation(TableName.valueOf(TABLE_NAME.getName()), ROW.clone()));
@ -676,7 +676,7 @@ public class TestHCM {
// We do a put and expect the cache to be updated, even if we don't retry
LOG.info("Put starting");
Put put3 = new Put(ROW);
put3.add(FAM_NAM, ROW, ROW);
put3.addColumn(FAM_NAM, ROW, ROW);
try {
table.put(put3);
Assert.fail("Unreachable point");
@ -783,7 +783,7 @@ public class TestHCM {
public void testCacheSeqNums() throws Exception{
Table table = TEST_UTIL.createMultiRegionTable(TABLE_NAME2, FAM_NAM);
Put put = new Put(ROW);
put.add(FAM_NAM, ROW, ROW);
put.addColumn(FAM_NAM, ROW, ROW);
table.put(put);
ConnectionImplementation conn = (ConnectionImplementation) TEST_UTIL.getConnection();
@ -895,44 +895,44 @@ public class TestHCM {
@Test
public void testMulti() throws Exception {
Table table = TEST_UTIL.createMultiRegionTable(TABLE_NAME3, FAM_NAM);
try {
ConnectionImplementation conn =
try {
ConnectionImplementation conn =
(ConnectionImplementation)TEST_UTIL.getConnection();
// We're now going to move the region and check that it works for the client
// First a new put to add the location in the cache
conn.clearRegionCache(TABLE_NAME3);
Assert.assertEquals(0, conn.getNumberOfCachedRegionLocations(TABLE_NAME3));
// We're now going to move the region and check that it works for the client
// First a new put to add the location in the cache
conn.clearRegionCache(TABLE_NAME3);
Assert.assertEquals(0, conn.getNumberOfCachedRegionLocations(TABLE_NAME3));
TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, false);
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, false);
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
// We can wait for all regions to be online, that makes log reading easier when debugging
while (master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
Thread.sleep(1);
}
// We can wait for all regions to be online, that makes log reading easier when debugging
while (master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
Thread.sleep(1);
}
Put put = new Put(ROW_X);
put.add(FAM_NAM, ROW_X, ROW_X);
table.put(put);
Put put = new Put(ROW_X);
put.addColumn(FAM_NAM, ROW_X, ROW_X);
table.put(put);
// Now moving the region to the second server
HRegionLocation toMove = conn.getCachedLocation(TABLE_NAME3, ROW_X).getRegionLocation();
byte[] regionName = toMove.getRegionInfo().getRegionName();
byte[] encodedRegionNameBytes = toMove.getRegionInfo().getEncodedNameAsBytes();
// Now moving the region to the second server
HRegionLocation toMove = conn.getCachedLocation(TABLE_NAME3, ROW_X).getRegionLocation();
byte[] regionName = toMove.getRegionInfo().getRegionName();
byte[] encodedRegionNameBytes = toMove.getRegionInfo().getEncodedNameAsBytes();
// Choose the other server.
int curServerId = TEST_UTIL.getHBaseCluster().getServerWith(regionName);
int destServerId = (curServerId == 0 ? 1 : 0);
// Choose the other server.
int curServerId = TEST_UTIL.getHBaseCluster().getServerWith(regionName);
int destServerId = (curServerId == 0 ? 1 : 0);
HRegionServer curServer = TEST_UTIL.getHBaseCluster().getRegionServer(curServerId);
HRegionServer destServer = TEST_UTIL.getHBaseCluster().getRegionServer(destServerId);
HRegionServer curServer = TEST_UTIL.getHBaseCluster().getRegionServer(curServerId);
HRegionServer destServer = TEST_UTIL.getHBaseCluster().getRegionServer(destServerId);
ServerName destServerName = destServer.getServerName();
ServerName destServerName = destServer.getServerName();
//find another row in the cur server that is less than ROW_X
List<Region> regions = curServer.getOnlineRegions(TABLE_NAME3);
byte[] otherRow = null;
List<Region> regions = curServer.getOnlineRegions(TABLE_NAME3);
byte[] otherRow = null;
for (Region region : regions) {
if (!region.getRegionInfo().getEncodedName().equals(toMove.getRegionInfo().getEncodedName())
&& Bytes.BYTES_COMPARATOR.compare(region.getRegionInfo().getStartKey(), ROW_X) < 0) {
@ -940,69 +940,71 @@ public class TestHCM {
break;
}
}
assertNotNull(otherRow);
// If empty row, set it to first row.-f
if (otherRow.length <= 0) otherRow = Bytes.toBytes("aaa");
Put put2 = new Put(otherRow);
put2.add(FAM_NAM, otherRow, otherRow);
table.put(put2); //cache put2's location
assertNotNull(otherRow);
// If empty row, set it to first row.-f
if (otherRow.length <= 0) otherRow = Bytes.toBytes("aaa");
Put put2 = new Put(otherRow);
put2.addColumn(FAM_NAM, otherRow, otherRow);
table.put(put2); //cache put2's location
// Check that we are in the expected state
Assert.assertTrue(curServer != destServer);
Assert.assertNotEquals(curServer.getServerName(), destServer.getServerName());
Assert.assertNotEquals(toMove.getPort(), destServerName.getPort());
Assert.assertNotNull(curServer.getOnlineRegion(regionName));
Assert.assertNull(destServer.getOnlineRegion(regionName));
Assert.assertFalse(TEST_UTIL.getMiniHBaseCluster().getMaster().
getAssignmentManager().getRegionStates().isRegionsInTransition());
// Check that we are in the expected state
Assert.assertTrue(curServer != destServer);
Assert.assertNotEquals(curServer.getServerName(), destServer.getServerName());
Assert.assertNotEquals(toMove.getPort(), destServerName.getPort());
Assert.assertNotNull(curServer.getOnlineRegion(regionName));
Assert.assertNull(destServer.getOnlineRegion(regionName));
Assert.assertFalse(TEST_UTIL.getMiniHBaseCluster().getMaster().
getAssignmentManager().getRegionStates().isRegionsInTransition());
// Moving. It's possible that we don't have all the regions online at this point, so
// the test must depends only on the region we're looking at.
LOG.info("Move starting region="+toMove.getRegionInfo().getRegionNameAsString());
TEST_UTIL.getHBaseAdmin().move(
toMove.getRegionInfo().getEncodedNameAsBytes(),
destServerName.getServerName().getBytes()
);
// the test must depends only on the region we're looking at.
LOG.info("Move starting region="+toMove.getRegionInfo().getRegionNameAsString());
TEST_UTIL.getHBaseAdmin().move(
toMove.getRegionInfo().getEncodedNameAsBytes(),
destServerName.getServerName().getBytes()
);
while (destServer.getOnlineRegion(regionName) == null ||
destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
// wait for the move to be finished
Thread.sleep(1);
}
while (destServer.getOnlineRegion(regionName) == null ||
destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
// wait for the move to be finished
Thread.sleep(1);
}
LOG.info("Move finished for region="+toMove.getRegionInfo().getRegionNameAsString());
LOG.info("Move finished for region="+toMove.getRegionInfo().getRegionNameAsString());
// Check our new state.
Assert.assertNull(curServer.getOnlineRegion(regionName));
Assert.assertNotNull(destServer.getOnlineRegion(regionName));
Assert.assertFalse(destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes));
Assert.assertFalse(curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes));
// Check our new state.
Assert.assertNull(curServer.getOnlineRegion(regionName));
Assert.assertNotNull(destServer.getOnlineRegion(regionName));
Assert.assertFalse(destServer.getRegionsInTransitionInRS()
.containsKey(encodedRegionNameBytes));
Assert.assertFalse(curServer.getRegionsInTransitionInRS()
.containsKey(encodedRegionNameBytes));
// Cache was NOT updated and points to the wrong server
Assert.assertFalse(
conn.getCachedLocation(TABLE_NAME3, ROW_X).getRegionLocation()
.getPort() == destServerName.getPort());
Assert.assertFalse(
conn.getCachedLocation(TABLE_NAME3, ROW_X).getRegionLocation()
.getPort() == destServerName.getPort());
// Hijack the number of retry to fail after 2 tries
final int prevNumRetriesVal = setNumTries(conn, 2);
// Hijack the number of retry to fail after 2 tries
final int prevNumRetriesVal = setNumTries(conn, 2);
Put put3 = new Put(ROW_X);
put3.add(FAM_NAM, ROW_X, ROW_X);
Put put4 = new Put(otherRow);
put4.add(FAM_NAM, otherRow, otherRow);
Put put3 = new Put(ROW_X);
put3.addColumn(FAM_NAM, ROW_X, ROW_X);
Put put4 = new Put(otherRow);
put4.addColumn(FAM_NAM, otherRow, otherRow);
// do multi
ArrayList<Put> actions = Lists.newArrayList(put4, put3);
table.batch(actions, null); // first should be a valid row,
// second we get RegionMovedException.
// second we get RegionMovedException.
setNumTries(conn, prevNumRetriesVal);
} finally {
table.close();
}
setNumTries(conn, prevNumRetriesVal);
} finally {
table.close();
}
}
@Ignore ("Test presumes RETRY_BACKOFF will never change; it has") @Test
@ -1095,4 +1097,4 @@ public class TestHCM {
table.close();
connection.close();
}
}
}

View File

@ -110,11 +110,11 @@ public class TestHTableMultiplexer {
for (int i = 0; i < NUM_REGIONS; i++) {
byte [] row = startRows[i];
if (row == null || row.length <= 0) continue;
Put put = new Put(row).add(FAMILY, QUALIFIER, VALUE1);
Put put = new Put(row).addColumn(FAMILY, QUALIFIER, VALUE1);
success = multiplexer.put(TABLE_1, put);
assertTrue("multiplexer.put returns", success);
put = new Put(row).add(FAMILY, QUALIFIER, VALUE1);
put = new Put(row).addColumn(FAMILY, QUALIFIER, VALUE1);
success = multiplexer.put(TABLE_2, put);
assertTrue("multiplexer.put failed", success);
@ -131,7 +131,7 @@ public class TestHTableMultiplexer {
byte [] row = endRows[i];
if (row == null || row.length <= 0) continue;
Put put = new Put(row);
put.add(FAMILY, QUALIFIER, VALUE2);
put.addColumn(FAMILY, QUALIFIER, VALUE2);
multiput.add(put);
}
failedPuts = multiplexer.put(TABLE_1, multiput);

View File

@ -97,7 +97,7 @@ public class TestHTableMultiplexerFlushCache {
byte[] row = startRows[1];
assertTrue("2nd region should not start with empty row", row != null && row.length > 0);
Put put = new Put(row).add(FAMILY, QUALIFIER1, VALUE1);
Put put = new Put(row).addColumn(FAMILY, QUALIFIER1, VALUE1);
assertTrue("multiplexer.put returns", multiplexer.put(TABLE, put));
checkExistence(htable, row, FAMILY, QUALIFIER1, VALUE1);

View File

@ -208,7 +208,7 @@ public class TestMetaWithReplicas {
htable = c.getTable(TABLE);
byte[] row = "test".getBytes();
Put put = new Put(row);
put.add("foo".getBytes(), row, row);
put.addColumn("foo".getBytes(), row, row);
BufferedMutator m = c.getBufferedMutator(TABLE);
m.mutate(put);
m.flush();

View File

@ -213,10 +213,10 @@ public class TestMultiParallel {
List<Row> actions = new ArrayList<Row>();
Put p = new Put(Bytes.toBytes("row1"));
p.add(Bytes.toBytes("bad_family"), Bytes.toBytes("qual"), Bytes.toBytes("value"));
p.addColumn(Bytes.toBytes("bad_family"), Bytes.toBytes("qual"), Bytes.toBytes("value"));
actions.add(p);
p = new Put(Bytes.toBytes("row2"));
p.add(BYTES_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value"));
p.addColumn(BYTES_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value"));
actions.add(p);
// row1 and row2 should be in the same region.
@ -423,7 +423,7 @@ public class TestMultiParallel {
for (int i = 0; i < 100; i++) {
Put put = new Put(ONE_ROW);
byte[] qual = Bytes.toBytes("column" + i);
put.add(BYTES_FAMILY, qual, VALUE);
put.addColumn(BYTES_FAMILY, qual, VALUE);
puts.add(put);
}
Object[] results = new Object[puts.size()];
@ -464,8 +464,8 @@ public class TestMultiParallel {
Delete d = new Delete(ONE_ROW);
table.delete(d);
Put put = new Put(ONE_ROW);
put.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("abc"));
put.add(BYTES_FAMILY, QUAL2, Bytes.toBytes(1L));
put.addColumn(BYTES_FAMILY, QUAL1, Bytes.toBytes("abc"));
put.addColumn(BYTES_FAMILY, QUAL2, Bytes.toBytes(1L));
table.put(put);
Increment inc = new Increment(ONE_ROW);
@ -494,7 +494,7 @@ public class TestMultiParallel {
final Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
Table table = connection.getTable(TEST_TABLE);
Put put = new Put(ONE_ROW);
put.add(BYTES_FAMILY, QUALIFIER, Bytes.toBytes(0L));
put.addColumn(BYTES_FAMILY, QUALIFIER, Bytes.toBytes(0L));
// Replace nonce manager with the one that returns each nonce twice.
NonceGenerator cnm = new PerClientRandomNonceGenerator() {
@ -610,7 +610,7 @@ public class TestMultiParallel {
// 2 put of new column
Put put = new Put(KEYS[10]);
put.add(BYTES_FAMILY, qual2, val2);
put.addColumn(BYTES_FAMILY, qual2, val2);
actions.add(put);
// 3 delete
@ -629,7 +629,7 @@ public class TestMultiParallel {
// 5 put of new column
put = new Put(KEYS[40]);
put.add(BYTES_FAMILY, qual2, val2);
put.addColumn(BYTES_FAMILY, qual2, val2);
actions.add(put);
results = new Object[actions.size()];
@ -673,7 +673,7 @@ public class TestMultiParallel {
List<Put> puts = new ArrayList<>();
for (byte[] k : KEYS) {
Put put = new Put(k);
put.add(BYTES_FAMILY, QUALIFIER, VALUE);
put.addColumn(BYTES_FAMILY, QUALIFIER, VALUE);
puts.add(put);
}
return puts;

View File

@ -475,8 +475,8 @@ public class TestMultipleTimestamps {
for(int colIdx: columnIndexes) {
byte column[] = Bytes.toBytes("column:" + colIdx);
for (long version: versions) {
put.add(cf, column, version, Bytes.toBytes("value-version-" +
version));
put.addColumn(cf, column, version, Bytes.toBytes("value-version-" +
version));
}
}
ht.put(put);
@ -496,7 +496,7 @@ public class TestMultipleTimestamps {
put.setDurability(Durability.SKIP_WAL);
for (long idx = versionStart; idx <= versionEnd; idx++) {
put.add(cf, column, idx, Bytes.toBytes("value-version-" + idx));
put.addColumn(cf, column, idx, Bytes.toBytes("value-version-" + idx));
}
ht.put(put);

View File

@ -49,7 +49,7 @@ public class TestPutDeleteEtcCellIteration {
Put p = new Put(ROW);
for (int i = 0; i < COUNT; i++) {
byte [] bytes = Bytes.toBytes(i);
p.add(bytes, bytes, TIMESTAMP, bytes);
p.addColumn(bytes, bytes, TIMESTAMP, bytes);
}
int index = 0;
for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) {
@ -65,7 +65,7 @@ public class TestPutDeleteEtcCellIteration {
Put p = new Put(ROW);
for (int i = 0; i < COUNT; i++) {
byte [] bytes = Bytes.toBytes(i);
p.add(bytes, bytes, TIMESTAMP, bytes);
p.addColumn(bytes, bytes, TIMESTAMP, bytes);
}
int index = 0;
int trigger = 3;
@ -73,7 +73,7 @@ public class TestPutDeleteEtcCellIteration {
Cell cell = cellScanner.current();
byte [] bytes = Bytes.toBytes(index++);
// When we hit the trigger, try inserting a new KV; should trigger exception
if (trigger == 3) p.add(bytes, bytes, TIMESTAMP, bytes);
if (trigger == 3) p.addColumn(bytes, bytes, TIMESTAMP, bytes);
cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
}
assertEquals(COUNT, index);

View File

@ -59,10 +59,10 @@ public class TestPutWithDelete {
try {
// put one row
Put put = new Put(rowKey);
put.add(family, Bytes.toBytes("A"), Bytes.toBytes("a"));
put.add(family, Bytes.toBytes("B"), Bytes.toBytes("b"));
put.add(family, Bytes.toBytes("C"), Bytes.toBytes("c"));
put.add(family, Bytes.toBytes("D"), Bytes.toBytes("d"));
put.addColumn(family, Bytes.toBytes("A"), Bytes.toBytes("a"));
put.addColumn(family, Bytes.toBytes("B"), Bytes.toBytes("b"));
put.addColumn(family, Bytes.toBytes("C"), Bytes.toBytes("c"));
put.addColumn(family, Bytes.toBytes("D"), Bytes.toBytes("d"));
table.put(put);
// get row back and assert the values
Get get = new Get(rowKey);
@ -77,11 +77,11 @@ public class TestPutWithDelete {
Bytes.toString(result.getValue(family, Bytes.toBytes("D"))).equals("d"));
// put the same row again with C column deleted
put = new Put(rowKey);
put.add(family, Bytes.toBytes("A"), Bytes.toBytes("a1"));
put.add(family, Bytes.toBytes("B"), Bytes.toBytes("b1"));
put.addColumn(family, Bytes.toBytes("A"), Bytes.toBytes("a1"));
put.addColumn(family, Bytes.toBytes("B"), Bytes.toBytes("b1"));
KeyValue marker = new KeyValue(rowKey, family, Bytes.toBytes("C"),
HConstants.LATEST_TIMESTAMP, KeyValue.Type.DeleteColumn);
put.add(family, Bytes.toBytes("D"), Bytes.toBytes("d1"));
put.addColumn(family, Bytes.toBytes("D"), Bytes.toBytes("d1"));
put.add(marker);
table.put(put);
// get row back and assert the values

View File

@ -196,7 +196,7 @@ public class TestReplicaWithCluster {
bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length);
p = new Put(row);
p.add(row, row, row);
p.addColumn(row, row, row);
table.put(p);
g = new Get(row);
@ -253,7 +253,7 @@ public class TestReplicaWithCluster {
admin.close();
Put p = new Put(row);
p.add(row, row, row);
p.addColumn(row, row, row);
final Table table = HTU.getConnection().getTable(hdt.getTableName());
table.put(p);

View File

@ -384,7 +384,7 @@ public class TestReplicasClient {
flushRegion(hriSecondary);
Put p = new Put(row);
p.add(f, row, row);
p.addColumn(f, row, row);
table.put(p);
flushRegion(hriPrimary);
@ -404,7 +404,7 @@ public class TestReplicasClient {
flushRegion(hriPrimary);
Put p = new Put(row);
p.add(f, row, row);
p.addColumn(f, row, row);
table.put(p);
flushRegion(hriPrimary);
@ -422,7 +422,7 @@ public class TestReplicasClient {
flushRegion(hriSecondary);
Put p = new Put(row);
p.add(f, row, row);
p.addColumn(f, row, row);
table.put(p);
flushRegion(hriSecondary);
@ -442,7 +442,7 @@ public class TestReplicasClient {
try {
// A simple put works, even if there here a second replica
Put p = new Put(b1);
p.add(f, b1, b1);
p.addColumn(f, b1, b1);
table.put(p);
LOG.info("Put done");
@ -534,12 +534,12 @@ public class TestReplicasClient {
List<Put> puts = new ArrayList<Put>(2);
byte[] b1 = Bytes.toBytes("testCancelOfMultiGet" + 0);
Put p = new Put(b1);
p.add(f, b1, b1);
p.addColumn(f, b1, b1);
puts.add(p);
byte[] b2 = Bytes.toBytes("testCancelOfMultiGet" + 1);
p = new Put(b2);
p.add(f, b2, b2);
p.addColumn(f, b2, b2);
puts.add(p);
table.put(puts);
LOG.debug("PUT done");
@ -619,7 +619,7 @@ public class TestReplicasClient {
for (int i = 0; i < NUMROWS; i++) {
byte[] b1 = Bytes.toBytes("testUseRegionWithReplica" + i);
Put p = new Put(b1);
p.add(f, b1, b1);
p.addColumn(f, b1, b1);
table.put(p);
}
LOG.debug("PUT done");

View File

@ -135,7 +135,7 @@ public class TestRpcControllerFactory {
Table table = connection.getTable(name);
byte[] row = Bytes.toBytes("row");
Put p = new Put(row);
p.add(fam1, fam1, Bytes.toBytes("val0"));
p.addColumn(fam1, fam1, Bytes.toBytes("val0"));
table.put(p);
Integer counter = 1;
@ -147,7 +147,7 @@ public class TestRpcControllerFactory {
counter = verifyCount(counter);
Put p2 = new Put(row);
p2.add(fam1, Bytes.toBytes("qual"), Bytes.toBytes("val1"));
p2.addColumn(fam1, Bytes.toBytes("qual"), Bytes.toBytes("val1"));
table.batch(Lists.newArrayList(p, p2), null);
// this only goes to a single server, so we don't need to change the count here
counter = verifyCount(counter);

View File

@ -68,9 +68,9 @@ public class TestScannerTimeout {
// We need more than one region server for this test
TEST_UTIL.startMiniCluster(2);
Table table = TEST_UTIL.createTable(TABLE_NAME, SOME_BYTES);
for (int i = 0; i < NB_ROWS; i++) {
for (int i = 0; i < NB_ROWS; i++) {
Put put = new Put(Bytes.toBytes(i));
put.add(SOME_BYTES, SOME_BYTES, SOME_BYTES);
put.addColumn(SOME_BYTES, SOME_BYTES, SOME_BYTES);
table.put(put);
}
table.close();

View File

@ -176,15 +176,15 @@ public class TestTimestampsFilter {
Table ht = TEST_UTIL.createTable(TableName.valueOf(TABLE), FAMILIES, Integer.MAX_VALUE);
Put p = new Put(Bytes.toBytes("row"));
p.add(FAMILY, Bytes.toBytes("column0"), 3, Bytes.toBytes("value0-3"));
p.add(FAMILY, Bytes.toBytes("column1"), 3, Bytes.toBytes("value1-3"));
p.add(FAMILY, Bytes.toBytes("column2"), 1, Bytes.toBytes("value2-1"));
p.add(FAMILY, Bytes.toBytes("column2"), 2, Bytes.toBytes("value2-2"));
p.add(FAMILY, Bytes.toBytes("column2"), 3, Bytes.toBytes("value2-3"));
p.add(FAMILY, Bytes.toBytes("column3"), 2, Bytes.toBytes("value3-2"));
p.add(FAMILY, Bytes.toBytes("column4"), 1, Bytes.toBytes("value4-1"));
p.add(FAMILY, Bytes.toBytes("column4"), 2, Bytes.toBytes("value4-2"));
p.add(FAMILY, Bytes.toBytes("column4"), 3, Bytes.toBytes("value4-3"));
p.addColumn(FAMILY, Bytes.toBytes("column0"), (long) 3, Bytes.toBytes("value0-3"));
p.addColumn(FAMILY, Bytes.toBytes("column1"), (long) 3, Bytes.toBytes("value1-3"));
p.addColumn(FAMILY, Bytes.toBytes("column2"), (long) 1, Bytes.toBytes("value2-1"));
p.addColumn(FAMILY, Bytes.toBytes("column2"), (long) 2, Bytes.toBytes("value2-2"));
p.addColumn(FAMILY, Bytes.toBytes("column2"), (long) 3, Bytes.toBytes("value2-3"));
p.addColumn(FAMILY, Bytes.toBytes("column3"), (long) 2, Bytes.toBytes("value3-2"));
p.addColumn(FAMILY, Bytes.toBytes("column4"), (long) 1, Bytes.toBytes("value4-1"));
p.addColumn(FAMILY, Bytes.toBytes("column4"), (long) 2, Bytes.toBytes("value4-2"));
p.addColumn(FAMILY, Bytes.toBytes("column4"), (long) 3, Bytes.toBytes("value4-3"));
ht.put(p);
ArrayList<Long> timestamps = new ArrayList<Long>();
@ -360,7 +360,7 @@ public class TestTimestampsFilter {
put.setDurability(Durability.SKIP_WAL);
for (long idx = versionStart; idx <= versionEnd; idx++) {
put.add(cf, column, idx, Bytes.toBytes("value-version-" + idx));
put.addColumn(cf, column, idx, Bytes.toBytes("value-version-" + idx));
}
ht.put(put);

View File

@ -86,7 +86,8 @@ public class TestConstraint {
// test that we don't fail on a valid put
Put put = new Put(row1);
byte[] value = Integer.toString(10).getBytes();
put.add(dummy, new byte[0], value);
byte[] qualifier = new byte[0];
put.addColumn(dummy, qualifier, value);
table.put(put);
} finally {
table.close();
@ -117,7 +118,8 @@ public class TestConstraint {
// test that we do fail on violation
Put put = new Put(row1);
put.add(dummy, new byte[0], "fail".getBytes());
byte[] qualifier = new byte[0];
put.addColumn(dummy, qualifier, "fail".getBytes());
LOG.warn("Doing put in table");
try {
table.put(put);
@ -160,7 +162,8 @@ public class TestConstraint {
try {
// test that we don't fail because its disabled
Put put = new Put(row1);
put.add(dummy, new byte[0], "pass".getBytes());
byte[] qualifier = new byte[0];
put.addColumn(dummy, qualifier, "pass".getBytes());
table.put(put);
} finally {
table.close();
@ -192,7 +195,8 @@ public class TestConstraint {
try {
// test that we do fail on violation
Put put = new Put(row1);
put.add(dummy, new byte[0], "pass".getBytes());
byte[] qualifier = new byte[0];
put.addColumn(dummy, qualifier, "pass".getBytes());
LOG.warn("Doing put in table");
table.put(put);
} finally {
@ -224,8 +228,9 @@ public class TestConstraint {
// test that we do fail on violation
Put put = new Put(row1);
put.add(dummy, new byte[0], "pass".getBytes());
byte[] qualifier = new byte[0];
put.addColumn(dummy, qualifier, "pass".getBytes());
try{
table.put(put);
fail("RuntimeFailConstraint wasn't triggered - this put shouldn't work!");

View File

@ -93,12 +93,12 @@ public class TestAggregateProtocol {
Put put = new Put(ROWS[i]);
put.setDurability(Durability.SKIP_WAL);
Long l = new Long(i);
put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(l));
put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(l));
table.put(put);
Put p2 = new Put(ROWS[i]);
put.setDurability(Durability.SKIP_WAL);
p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(l)), Bytes
.toBytes(l * 10));
p2.addColumn(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(l)), Bytes
.toBytes(l * 10));
table.put(p2);
}
table.close();

View File

@ -94,7 +94,7 @@ public class TestBatchCoprocessorEndpoint {
Table table = util.getConnection().getTable(TEST_TABLE);
for (int i = 0; i < ROWSIZE; i++) {
Put put = new Put(ROWS[i]);
put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
table.put(put);
}
table.close();

View File

@ -90,11 +90,12 @@ public class TestDoubleColumnInterpreter {
Put put = new Put(ROWS[i]);
put.setDurability(Durability.SKIP_WAL);
Double d = new Double(i);
put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(d));
put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(d));
table.put(put);
Put p2 = new Put(ROWS[i]);
put.setDurability(Durability.SKIP_WAL);
p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(d)), Bytes.toBytes(d * 0.10));
p2.addColumn(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(d)),
Bytes.toBytes(d * 0.10));
table.put(p2);
}
table.close();

View File

@ -108,15 +108,15 @@ public class TestHTableWrapper {
table = util.createTable(TEST_TABLE, TEST_FAMILY);
Put puta = new Put(ROW_A);
puta.add(TEST_FAMILY, qualifierCol1, bytes1);
puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1);
table.put(puta);
Put putb = new Put(ROW_B);
putb.add(TEST_FAMILY, qualifierCol1, bytes2);
putb.addColumn(TEST_FAMILY, qualifierCol1, bytes2);
table.put(putb);
Put putc = new Put(ROW_C);
putc.add(TEST_FAMILY, qualifierCol1, bytes3);
putc.addColumn(TEST_FAMILY, qualifierCol1, bytes3);
table.put(putc);
}
@ -204,7 +204,7 @@ public class TestHTableWrapper {
private void checkPutsAndDeletes() throws IOException {
// put:
Put putD = new Put(ROW_D).add(TEST_FAMILY, qualifierCol1, bytes2);
Put putD = new Put(ROW_D).addColumn(TEST_FAMILY, qualifierCol1, bytes2);
hTableInterface.put(putD);
checkRowValue(ROW_D, bytes2);
@ -214,8 +214,8 @@ public class TestHTableWrapper {
checkRowValue(ROW_D, null);
// multiple puts:
Put[] puts = new Put[] { new Put(ROW_D).add(TEST_FAMILY, qualifierCol1, bytes2),
new Put(ROW_E).add(TEST_FAMILY, qualifierCol1, bytes3) };
Put[] puts = new Put[] {new Put(ROW_D).addColumn(TEST_FAMILY, qualifierCol1, bytes2),
new Put(ROW_E).addColumn(TEST_FAMILY, qualifierCol1, bytes3)};
hTableInterface.put(Arrays.asList(puts));
checkRowsValues(new byte[][] { ROW_D, ROW_E }, new byte[][] { bytes2, bytes3 });
@ -226,7 +226,7 @@ public class TestHTableWrapper {
}
private void checkCheckAndPut() throws IOException {
Put putC = new Put(ROW_C).add(TEST_FAMILY, qualifierCol1, bytes5);
Put putC = new Put(ROW_C).addColumn(TEST_FAMILY, qualifierCol1, bytes5);
assertFalse(hTableInterface.checkAndPut(ROW_C, TEST_FAMILY, qualifierCol1, /* expect */bytes4,
putC/* newValue */));
assertTrue(hTableInterface.checkAndPut(ROW_C, TEST_FAMILY, qualifierCol1, /* expect */bytes3,
@ -242,7 +242,7 @@ public class TestHTableWrapper {
}
private void checkIncrementColumnValue() throws IOException {
hTableInterface.put(new Put(ROW_A).add(TEST_FAMILY, qualifierCol1, Bytes.toBytes(1L)));
hTableInterface.put(new Put(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, Bytes.toBytes(1L)));
checkRowValue(ROW_A, Bytes.toBytes(1L));
final long newVal = hTableInterface
@ -319,7 +319,7 @@ public class TestHTableWrapper {
}
private void checkMutateRow() throws IOException {
Put put = new Put(ROW_A).add(TEST_FAMILY, qualifierCol1, bytes1);
Put put = new Put(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, bytes1);
RowMutations rowMutations = new RowMutations(ROW_A);
rowMutations.add(put);
hTableInterface.mutateRow(rowMutations);

View File

@ -102,7 +102,7 @@ public class TestOpenTableInCoprocessor {
final WALEdit edit, final Durability durability) throws IOException {
Table table = e.getEnvironment().getTable(otherTable, getPool());
Put p = new Put(new byte[] { 'a' });
p.add(family, null, new byte[] { 'a' });
p.addColumn(family, null, new byte[]{'a'});
try {
table.batch(Collections.singletonList(put), null);
} catch (InterruptedException e1) {
@ -162,7 +162,7 @@ public class TestOpenTableInCoprocessor {
Table table = UTIL.getConnection().getTable(TableName.valueOf("primary"));
Put p = new Put(new byte[] { 'a' });
p.add(family, null, new byte[] { 'a' });
p.addColumn(family, null, new byte[]{'a'});
table.put(p);
table.close();

View File

@ -94,7 +94,7 @@ public class TestRegionObserverBypass {
public void testSimple() throws Exception {
Table t = util.getConnection().getTable(tableName);
Put p = new Put(row1);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
// before HBASE-4331, this would throw an exception
t.put(p);
checkRowAndDelete(t,row1,0);
@ -114,13 +114,13 @@ public class TestRegionObserverBypass {
Table t = util.getConnection().getTable(tableName);
List<Put> puts = new ArrayList<Put>();
Put p = new Put(row1);
p.add(dummy,dummy,dummy);
p.addColumn(dummy, dummy, dummy);
puts.add(p);
p = new Put(row2);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
p = new Put(row3);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
// before HBASE-4331, this would throw an exception
t.put(puts);
@ -130,13 +130,13 @@ public class TestRegionObserverBypass {
puts.clear();
p = new Put(row1);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
p = new Put(row2);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
p = new Put(row3);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
// before HBASE-4331, this would throw an exception
t.put(puts);
@ -146,13 +146,13 @@ public class TestRegionObserverBypass {
puts.clear();
p = new Put(row1);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
p = new Put(row2);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
p = new Put(row3);
p.add(dummy,dummy,dummy);
p.addColumn(dummy, dummy, dummy);
puts.add(p);
// this worked fine even before HBASE-4331
t.put(puts);
@ -162,13 +162,13 @@ public class TestRegionObserverBypass {
puts.clear();
p = new Put(row1);
p.add(dummy,dummy,dummy);
p.addColumn(dummy, dummy, dummy);
puts.add(p);
p = new Put(row2);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
p = new Put(row3);
p.add(dummy,dummy,dummy);
p.addColumn(dummy, dummy, dummy);
puts.add(p);
// this worked fine even before HBASE-4331
t.put(puts);
@ -178,13 +178,13 @@ public class TestRegionObserverBypass {
puts.clear();
p = new Put(row1);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
p = new Put(row2);
p.add(dummy,dummy,dummy);
p.addColumn(dummy, dummy, dummy);
puts.add(p);
p = new Put(row3);
p.add(test,dummy,dummy);
p.addColumn(test, dummy, dummy);
puts.add(p);
// before HBASE-4331, this would throw an exception
t.put(puts);

View File

@ -130,9 +130,9 @@ public class TestRegionObserverInterface {
new Boolean[] { false, false, false, false, false, false, false, false });
Put put = new Put(ROW);
put.add(A, A, A);
put.add(B, B, B);
put.add(C, C, C);
put.addColumn(A, A, A);
put.addColumn(B, B, B);
put.addColumn(C, C, C);
table.put(put);
verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
@ -189,9 +189,9 @@ public class TestRegionObserverInterface {
tableName,
new Boolean[] {false, false, false, false, false});
Put put = new Put(ROW);
put.add(A, A, A);
put.add(B, B, B);
put.add(C, C, C);
put.addColumn(A, A, A);
put.addColumn(B, B, B);
put.addColumn(C, C, C);
Delete delete = new Delete(ROW);
delete.deleteColumn(A, A);
@ -248,10 +248,10 @@ public class TestRegionObserverInterface {
TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks");
try (Table table = util.createTable(tableName, new byte[][] {A, B, C})) {
Put p = new Put(Bytes.toBytes(0));
p.add(A, A, A);
p.addColumn(A, A, A);
table.put(p);
p = new Put(Bytes.toBytes(0));
p.add(A, A, A);
p.addColumn(A, A, A);
verifyMethodResult(SimpleRegionObserver.class,
new String[] { "hadPreCheckAndPut", "hadPreCheckAndPutAfterRowLock",
"hadPostCheckAndPut" }, tableName, new Boolean[] { false, false, false });
@ -274,7 +274,7 @@ public class TestRegionObserverInterface {
Table table = util.createTable(tableName, new byte[][] {A, B, C});
try {
Put p = new Put(Bytes.toBytes(0));
p.add(A, A, A);
p.addColumn(A, A, A);
table.put(p);
Delete d = new Delete(Bytes.toBytes(0));
table.delete(d);
@ -338,7 +338,7 @@ public class TestRegionObserverInterface {
Table table = util.getConnection().getTable(tableName);
Put put = new Put(ROW);
put.add(A, A, A);
put.addColumn(A, A, A);
table.put(put);
Get get = new Get(ROW);
@ -412,7 +412,7 @@ public class TestRegionObserverInterface {
Table table = util.getConnection().getTable(tableName);
Put put = new Put(ROW);
put.add(A, A, A);
put.addColumn(A, A, A);
table.put(put);
Delete delete = new Delete(ROW);
@ -522,7 +522,7 @@ public class TestRegionObserverInterface {
byte[] iBytes = Bytes.toBytes(i);
Put put = new Put(iBytes);
put.setDurability(Durability.SKIP_WAL);
put.add(A, A, iBytes);
put.addColumn(A, A, iBytes);
table.put(put);
}
@ -677,9 +677,9 @@ public class TestRegionObserverInterface {
}
Put put = new Put(ROW);
put.add(A, A, A);
put.add(B, B, B);
put.add(C, C, C);
put.addColumn(A, A, A);
put.addColumn(B, B, B);
put.addColumn(C, C, C);
table.put(put);
verifyMethodResult(SimpleRegionObserver.Legacy.class,
@ -729,9 +729,9 @@ public class TestRegionObserverInterface {
}
Put put = new Put(ROW);
put.add(A, A, A);
put.add(B, B, B);
put.add(C, C, C);
put.addColumn(A, A, A);
put.addColumn(B, B, B);
put.addColumn(C, C, C);
table.put(put);
cluster.killRegionServer(rs1.getRegionServer().getServerName());

View File

@ -178,7 +178,7 @@ public class TestRegionObserverScannerOpenHook {
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.add(A, A, A);
put.addColumn(A, A, A);
region.put(put);
Get get = new Get(ROW);
@ -204,7 +204,7 @@ public class TestRegionObserverScannerOpenHook {
// put a row and flush it to disk
Put put = new Put(ROW);
put.add(A, A, A);
put.addColumn(A, A, A);
region.put(put);
region.flush(true);
Get get = new Get(ROW);
@ -278,7 +278,7 @@ public class TestRegionObserverScannerOpenHook {
// put a row and flush it to disk
Put put = new Put(ROW);
put.add(A, A, A);
put.addColumn(A, A, A);
table.put(put);
HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
@ -291,7 +291,7 @@ public class TestRegionObserverScannerOpenHook {
// put another row and flush that too
put = new Put(Bytes.toBytes("anotherrow"));
put.add(A, A, A);
put.addColumn(A, A, A);
table.put(put);
admin.flushRegion(region.getRegionInfo().getRegionName());

View File

@ -127,7 +127,7 @@ public class TestRegionObserverStacking extends TestCase {
h.load(ObserverC.class, Coprocessor.PRIORITY_LOWEST, conf);
Put put = new Put(ROW);
put.add(A, A, A);
put.addColumn(A, A, A);
region.put(put);
Coprocessor c = h.findCoprocessor(ObserverA.class.getName());

View File

@ -107,7 +107,7 @@ public class TestRegionServerCoprocessorExceptionWithAbort {
try {
final byte[] ROW = Bytes.toBytes("aaa");
Put put = new Put(ROW);
put.add(TEST_FAMILY, ROW, ROW);
put.addColumn(TEST_FAMILY, ROW, ROW);
table.put(put);
} catch (IOException e) {
// The region server is going to be aborted.

View File

@ -138,15 +138,15 @@ public class TestRowProcessorEndpoint {
table = util.createTable(TABLE, FAM);
{
Put put = new Put(ROW);
put.add(FAM, A, Bytes.add(B, C)); // B, C are friends of A
put.add(FAM, B, Bytes.add(D, E, F)); // D, E, F are friends of B
put.add(FAM, C, G); // G is a friend of C
put.addColumn(FAM, A, Bytes.add(B, C)); // B, C are friends of A
put.addColumn(FAM, B, Bytes.add(D, E, F)); // D, E, F are friends of B
put.addColumn(FAM, C, G); // G is a friend of C
table.put(put);
rowSize = put.size();
}
Put put = new Put(ROW2);
put.add(FAM, D, E);
put.add(FAM, F, G);
put.addColumn(FAM, D, E);
put.addColumn(FAM, F, G);
table.put(put);
row2Size = put.size();
}

View File

@ -489,7 +489,7 @@ public class TestWALObserver {
private Put creatPutWith2Families(byte[] row) throws IOException {
Put p = new Put(row);
for (int i = 0; i < TEST_FAMILY.length - 1; i++) {
p.add(TEST_FAMILY[i], TEST_QUALIFIER[i], TEST_VALUE[i]);
p.addColumn(TEST_FAMILY[i], TEST_QUALIFIER[i], TEST_VALUE[i]);
}
return p;
}

View File

@ -100,26 +100,26 @@ public class TestDependentColumnFilter {
private void addData() throws IOException {
Put put = new Put(ROWS[0]);
// add in an entry for each stamp, with 2 as a "good" value
put.add(FAMILIES[0], QUALIFIER, STAMPS[0], BAD_VALS[0]);
put.add(FAMILIES[0], QUALIFIER, STAMPS[1], BAD_VALS[1]);
put.add(FAMILIES[0], QUALIFIER, STAMPS[2], MATCH_VAL);
put.addColumn(FAMILIES[0], QUALIFIER, STAMPS[0], BAD_VALS[0]);
put.addColumn(FAMILIES[0], QUALIFIER, STAMPS[1], BAD_VALS[1]);
put.addColumn(FAMILIES[0], QUALIFIER, STAMPS[2], MATCH_VAL);
// add in entries for stamps 0 and 2.
// without a value check both will be "accepted"
// with one 2 will be accepted(since the corresponding ts entry
// has a matching value
put.add(FAMILIES[1], QUALIFIER, STAMPS[0], BAD_VALS[0]);
put.add(FAMILIES[1], QUALIFIER, STAMPS[2], BAD_VALS[2]);
put.addColumn(FAMILIES[1], QUALIFIER, STAMPS[0], BAD_VALS[0]);
put.addColumn(FAMILIES[1], QUALIFIER, STAMPS[2], BAD_VALS[2]);
this.region.put(put);
put = new Put(ROWS[1]);
put.add(FAMILIES[0], QUALIFIER, STAMPS[0], BAD_VALS[0]);
put.addColumn(FAMILIES[0], QUALIFIER, STAMPS[0], BAD_VALS[0]);
// there is no corresponding timestamp for this so it should never pass
put.add(FAMILIES[0], QUALIFIER, STAMPS[2], MATCH_VAL);
put.addColumn(FAMILIES[0], QUALIFIER, STAMPS[2], MATCH_VAL);
// if we reverse the qualifiers this one should pass
put.add(FAMILIES[1], QUALIFIER, STAMPS[0], MATCH_VAL);
put.addColumn(FAMILIES[1], QUALIFIER, STAMPS[0], MATCH_VAL);
// should pass
put.add(FAMILIES[1], QUALIFIER, STAMPS[1], BAD_VALS[2]);
put.addColumn(FAMILIES[1], QUALIFIER, STAMPS[1], BAD_VALS[2]);
this.region.put(put);
}

View File

@ -152,7 +152,7 @@ public class TestFilter {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for(byte [] QUALIFIER : QUALIFIERS_ONE) {
p.add(FAMILIES[0], QUALIFIER, VALUES[0]);
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[0]);
}
this.region.put(p);
}
@ -160,7 +160,7 @@ public class TestFilter {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for(byte [] QUALIFIER : QUALIFIERS_TWO) {
p.add(FAMILIES[1], QUALIFIER, VALUES[1]);
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]);
}
this.region.put(p);
}
@ -173,7 +173,7 @@ public class TestFilter {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for(byte [] QUALIFIER : QUALIFIERS_ONE) {
p.add(FAMILIES[1], QUALIFIER, VALUES[0]);
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[0]);
}
this.region.put(p);
}
@ -181,7 +181,7 @@ public class TestFilter {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for(byte [] QUALIFIER : QUALIFIERS_TWO) {
p.add(FAMILIES[0], QUALIFIER, VALUES[1]);
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[1]);
}
this.region.put(p);
}
@ -229,7 +229,7 @@ public class TestFilter {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_THREE) {
p.add(FAMILIES[0], QUALIFIER, VALUES[0]);
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[0]);
}
this.region.put(p);
@ -238,7 +238,7 @@ public class TestFilter {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_FOUR) {
p.add(FAMILIES[1], QUALIFIER, VALUES[1]);
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]);
}
this.region.put(p);
}
@ -250,7 +250,7 @@ public class TestFilter {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_THREE) {
p.add(FAMILIES[1], QUALIFIER, VALUES[0]);
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[0]);
}
this.region.put(p);
}
@ -258,7 +258,7 @@ public class TestFilter {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_FOUR) {
p.add(FAMILIES[0], QUALIFIER, VALUES[1]);
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[1]);
}
this.region.put(p);
}
@ -1458,7 +1458,7 @@ public class TestFilter {
for(int i=0; i<5; i++) {
Put p = new Put(Bytes.toBytes((char)('a'+i) + "row"));
p.setDurability(Durability.SKIP_WAL);
p.add(family, qualifier, Bytes.toBytes(String.valueOf(111+i)));
p.addColumn(family, qualifier, Bytes.toBytes(String.valueOf(111 + i)));
testRegion.put(p);
}
testRegion.flush(true);
@ -1501,7 +1501,7 @@ public class TestFilter {
// Need to change one of the group one columns to use group two value
Put p = new Put(ROWS_ONE[2]);
p.add(FAMILIES[0], QUALIFIERS_ONE[2], VALUES[1]);
p.addColumn(FAMILIES[0], QUALIFIERS_ONE[2], VALUES[1]);
this.region.put(p);
// Now let's grab rows that have Q_ONE[0](VALUES[0]) and Q_ONE[2](VALUES[1])
@ -1816,11 +1816,11 @@ public class TestFilter {
@Test
public void testColumnPaginationFilter() throws Exception {
// Test that the filter skips multiple column versions.
Put p = new Put(ROWS_ONE[0]);
p.setDurability(Durability.SKIP_WAL);
p.add(FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]);
this.region.put(p);
// Test that the filter skips multiple column versions.
Put p = new Put(ROWS_ONE[0]);
p.setDurability(Durability.SKIP_WAL);
p.addColumn(FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]);
this.region.put(p);
this.region.flush(true);
// Set of KVs (page: 1; pageSize: 1) - the first set of 1 column per row
@ -2017,7 +2017,7 @@ public class TestFilter {
for(int i=0; i<10; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.setDurability(Durability.SKIP_WAL);
p.add(FAMILIES[0], columnStatus, Bytes.toBytes(i%2));
p.addColumn(FAMILIES[0], columnStatus, Bytes.toBytes(i % 2));
testRegion.put(p);
}
testRegion.flush(true);

View File

@ -105,8 +105,7 @@ public class TestFilterWithScanLimits extends FilterTestingCluster {
for (int i = 1; i < 4; i++) {
Put put = new Put(Bytes.toBytes("row" + i));
for (int j = 1; j < 6; j++) {
put.add(Bytes.toBytes("f1"), Bytes.toBytes("c" + j),
Bytes.toBytes(i + "_c" + j));
put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("c" + j), Bytes.toBytes(i + "_c" + j));
}
puts.add(put);
}

View File

@ -129,8 +129,8 @@ public class TestFilterWrapper {
long timestamp = j;
if (i != 1)
timestamp = i;
put.add(Bytes.toBytes("f1"), Bytes.toBytes("c" + j), timestamp,
Bytes.toBytes(i + "_c" + j));
put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("c" + j), timestamp,
Bytes.toBytes(i + "_c" + j));
}
puts.add(put);
}

View File

@ -116,7 +116,7 @@ public class TestFuzzyRowAndColumnRangeFilter {
Put p = new Put(rk);
p.setDurability(Durability.SKIP_WAL);
p.add(cf.getBytes(), cq, Bytes.toBytes(c));
p.addColumn(cf.getBytes(), cq, Bytes.toBytes(c));
ht.put(p);
LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
+ Bytes.toStringBinary(cq));

View File

@ -139,7 +139,7 @@ public class TestFuzzyRowFilterEndToEnd {
Put p = new Put(rk);
p.setDurability(Durability.SKIP_WAL);
p.add(cf.getBytes(), cq, Bytes.toBytes(c));
p.addColumn(cf.getBytes(), cq, Bytes.toBytes(c));
ht.put(p);
}
}
@ -277,7 +277,7 @@ public class TestFuzzyRowFilterEndToEnd {
Put p = new Put(rk);
p.setDurability(Durability.SKIP_WAL);
p.add(cf.getBytes(), cq, Bytes.toBytes(c));
p.addColumn(cf.getBytes(), cq, Bytes.toBytes(c));
ht.put(p);
LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
+ Bytes.toStringBinary(cq));

View File

@ -75,8 +75,8 @@ public class TestInvocationRecordFilter {
Put put = new Put(ROW_BYTES);
for (int i = 0; i < 10; i += 2) {
// puts 0, 2, 4, 6 and 8
put.add(FAMILY_NAME_BYTES, Bytes.toBytes(QUALIFIER_PREFIX + i), i,
Bytes.toBytes(VALUE_PREFIX + i));
put.addColumn(FAMILY_NAME_BYTES, Bytes.toBytes(QUALIFIER_PREFIX + i), (long) i,
Bytes.toBytes(VALUE_PREFIX + i));
}
this.region.put(put);
this.region.flush(true);

View File

@ -75,7 +75,7 @@ public class TestScanRowPrefix extends FilterTestingCluster {
for (byte[] rowId: rowIds) {
Put p = new Put(rowId);
// Use the rowId as the column qualifier
p.add("F".getBytes(), rowId, "Dummy value".getBytes());
p.addColumn("F".getBytes(), rowId, "Dummy value".getBytes());
table.put(p);
}

View File

@ -318,7 +318,7 @@ public class TestBlockReorder {
// insert one put to ensure a minimal size
Put p = new Put(sb);
p.add(sb, sb, sb);
p.addColumn(sb, sb, sb);
h.put(p);
DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);

View File

@ -133,8 +133,7 @@ public class TestChangingEncoding {
for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
Put put = new Put(getRowKey(batchId, i));
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
put.add(CF_BYTES, getQualifier(j),
getValue(batchId, i, j));
put.addColumn(CF_BYTES, getQualifier(j), getValue(batchId, i, j));
}
put.setDurability(Durability.SKIP_WAL);
puts.add(put);

View File

@ -152,7 +152,7 @@ public class TestEncodedSeekers {
KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
put.add(kv);
} else {
put.add(CF_BYTES, col, value);
put.addColumn(CF_BYTES, col, value);
}
if(VERBOSE){
KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);

View File

@ -131,8 +131,8 @@ public class TestForceCacheImportantBlocks {
Put put = new Put(Bytes.toBytes("row" + i));
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
put.add(CF_BYTES, Bytes.toBytes("col" + j), ts,
Bytes.toBytes("value" + i + "_" + j + "_" + ts));
put.addColumn(CF_BYTES, Bytes.toBytes("col" + j), ts,
Bytes.toBytes("value" + i + "_" + j + "_" + ts));
}
}
region.put(put);

View File

@ -106,8 +106,8 @@ public class TestScannerSelectionUsingKeyRange {
for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
Put put = new Put(Bytes.toBytes("row" + iRow));
for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
}
region.put(put);
}

View File

@ -120,8 +120,8 @@ public class TestScannerSelectionUsingTTL {
for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
Put put = new Put(Bytes.toBytes("row" + iRow));
for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
ts + version, Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), ts + version,
Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
}
region.put(put);
}

View File

@ -125,12 +125,12 @@ public class TestTableInputFormat {
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
Put p = new Put("aaa".getBytes());
for (byte[] family : families) {
p.add(family, null, "value aaa".getBytes());
p.addColumn(family, null, "value aaa".getBytes());
}
table.put(p);
p = new Put("bbb".getBytes());
for (byte[] family : families) {
p.add(family, null, "value bbb".getBytes());
p.addColumn(family, null, "value bbb".getBytes());
}
table.put(p);
return table;

View File

@ -114,8 +114,7 @@ public class TestTableMapReduceUtil {
for (String president : presidentsRowKeys) {
if (presidentNames.hasNext()) {
Put p = new Put(Bytes.toBytes(president));
p.add(COLUMN_FAMILY, COLUMN_QUALIFIER,
Bytes.toBytes(presidentNames.next()));
p.addColumn(COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(presidentNames.next()));
table.put(p);
}
}
@ -123,7 +122,7 @@ public class TestTableMapReduceUtil {
for (String actor : actorsRowKeys) {
if (actorNames.hasNext()) {
Put p = new Put(Bytes.toBytes(actor));
p.add(COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(actorNames.next()));
p.addColumn(COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(actorNames.next()));
table.put(p);
}
}
@ -265,8 +264,9 @@ public class TestTableMapReduceUtil {
String name = Bytes.toString(result.getValue(COLUMN_FAMILY,
COLUMN_QUALIFIER));
outCollector.collect(outKey, new Put(Bytes.toBytes("rowKey2")).add(
COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(name)));
outCollector.collect(outKey,
new Put(Bytes.toBytes("rowKey2"))
.addColumn(COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(name)));
}
}
}

View File

@ -83,14 +83,14 @@ public class TestCellCounter {
Table t = UTIL.createTable(sourceTable, families);
try{
Put p = new Put(ROW1);
p.add(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.add(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.add(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
p.addColumn(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.addColumn(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.addColumn(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.add(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.add(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.add(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
p.addColumn(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
t.put(p);
String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1" };
runCount(args);
@ -120,31 +120,32 @@ public class TestCellCounter {
byte[][] families = { FAMILY_A, FAMILY_B };
Table t = UTIL.createTable(sourceTable, families);
try{
Put p = new Put(ROW1);
p.add(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.add(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.add(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.add(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.add(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.add(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
t.put(p);
String[] args = {
sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1", "--starttime=" + now,
"--endtime=" + now + 2 };
runCount(args);
FileInputStream inputStream = new FileInputStream(OUTPUT_DIR + File.separator +
"part-r-00000");
String data = IOUtils.toString(inputStream);
inputStream.close();
assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2"));
assertTrue(data.contains("Total Qualifiers across all Rows" + "\t" + "2"));
assertTrue(data.contains("Total ROWS" + "\t" + "1"));
assertTrue(data.contains("b;q" + "\t" + "1"));
assertTrue(data.contains("a;q" + "\t" + "1"));
assertTrue(data.contains("row1;a;q_Versions" + "\t" + "1"));
assertTrue(data.contains("row1;b;q_Versions" + "\t" + "1"));
Put p = new Put(ROW1);
p.addColumn(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.addColumn(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.addColumn(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.addColumn(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
t.put(p);
String[] args = {
sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1",
"--starttime=" + now,
"--endtime=" + now + 2 };
runCount(args);
FileInputStream inputStream = new FileInputStream(OUTPUT_DIR + File.separator +
"part-r-00000");
String data = IOUtils.toString(inputStream);
inputStream.close();
assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2"));
assertTrue(data.contains("Total Qualifiers across all Rows" + "\t" + "2"));
assertTrue(data.contains("Total ROWS" + "\t" + "1"));
assertTrue(data.contains("b;q" + "\t" + "1"));
assertTrue(data.contains("a;q" + "\t" + "1"));
assertTrue(data.contains("row1;a;q_Versions" + "\t" + "1"));
assertTrue(data.contains("row1;b;q_Versions" + "\t" + "1"));
}finally{
t.close();
FileUtil.fullyDelete(new File(OUTPUT_DIR));
@ -160,38 +161,38 @@ public class TestCellCounter {
byte[][] families = { FAMILY_A, FAMILY_B };
Table t = UTIL.createTable(sourceTable, families);
try{
Put p = new Put(ROW1);
p.add(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.add(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.add(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.add(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.add(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.add(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
t.put(p);
String[] args = {
sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1",
"--endtime=" + now + 1 };
runCount(args);
FileInputStream inputStream = new FileInputStream(OUTPUT_DIR + File.separator +
"part-r-00000");
String data = IOUtils.toString(inputStream);
inputStream.close();
assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2"));
assertTrue(data.contains("Total Qualifiers across all Rows" + "\t" + "2"));
assertTrue(data.contains("Total ROWS" + "\t" + "1"));
assertTrue(data.contains("b;q" + "\t" + "1"));
assertTrue(data.contains("a;q" + "\t" + "1"));
assertTrue(data.contains("row1;a;q_Versions" + "\t" + "1"));
assertTrue(data.contains("row1;b;q_Versions" + "\t" + "1"));
Put p = new Put(ROW1);
p.addColumn(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.addColumn(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.addColumn(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.addColumn(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
t.put(p);
String[] args = {
sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1",
"--endtime=" + now + 1 };
runCount(args);
FileInputStream inputStream = new FileInputStream(OUTPUT_DIR + File.separator +
"part-r-00000");
String data = IOUtils.toString(inputStream);
inputStream.close();
assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2"));
assertTrue(data.contains("Total Qualifiers across all Rows" + "\t" + "2"));
assertTrue(data.contains("Total ROWS" + "\t" + "1"));
assertTrue(data.contains("b;q" + "\t" + "1"));
assertTrue(data.contains("a;q" + "\t" + "1"));
assertTrue(data.contains("row1;a;q_Versions" + "\t" + "1"));
assertTrue(data.contains("row1;b;q_Versions" + "\t" + "1"));
}finally{
t.close();
FileUtil.fullyDelete(new File(OUTPUT_DIR));
}
}
/**
/**
* Test CellCounter with time range all data should print to output
*/
@Test (timeout=300000)
@ -200,27 +201,27 @@ public class TestCellCounter {
byte[][] families = { FAMILY_A, FAMILY_B };
Table t = UTIL.createTable(sourceTable, families);
try{
Put p = new Put(ROW1);
p.add(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.add(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.add(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.add(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.add(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.add(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
t.put(p);
String[] args = {
Put p = new Put(ROW1);
p.addColumn(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.addColumn(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.addColumn(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.addColumn(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
t.put(p);
String[] args = {
sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "--starttime=" + now + 1,
"--endtime=" + now + 2 };
"--endtime=" + now + 2 };
runCount(args);
FileInputStream inputStream = new FileInputStream(OUTPUT_DIR + File.separator +
"part-r-00000");
String data = IOUtils.toString(inputStream);
runCount(args);
FileInputStream inputStream = new FileInputStream(OUTPUT_DIR + File.separator +
"part-r-00000");
String data = IOUtils.toString(inputStream);
inputStream.close();
// nothing should hace been emitted to the reducer
assertTrue(data.isEmpty());
// nothing should hace been emitted to the reducer
assertTrue(data.isEmpty());
}finally{
t.close();
FileUtil.fullyDelete(new File(OUTPUT_DIR));
@ -283,14 +284,14 @@ public class TestCellCounter {
Table t = UTIL.createTable(sourceTable, families);
try {
Put p = new Put(ROW1);
p.add(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.add(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.add(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
p.addColumn(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.addColumn(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.addColumn(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.add(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.add(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.add(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
p.addColumn(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
t.put(p);
String[] args = { sourceTable.getNameAsString(), outputDir.toString(), ";" };
runCount(args);

View File

@ -80,7 +80,7 @@ public class TestCopyTable {
// put rows into the first table
for (int i = 0; i < 10; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.add(FAMILY, COLUMN1, COLUMN1);
p.addColumn(FAMILY, COLUMN1, COLUMN1);
t1.put(p);
}
@ -143,13 +143,13 @@ public class TestCopyTable {
// put rows into the first table
Put p = new Put(ROW0);
p.add(FAMILY, COLUMN1, COLUMN1);
p.addColumn(FAMILY, COLUMN1, COLUMN1);
t1.put(p);
p = new Put(ROW1);
p.add(FAMILY, COLUMN1, COLUMN1);
p.addColumn(FAMILY, COLUMN1, COLUMN1);
t1.put(p);
p = new Put(ROW2);
p.add(FAMILY, COLUMN1, COLUMN1);
p.addColumn(FAMILY, COLUMN1, COLUMN1);
t1.put(p);
CopyTable copy = new CopyTable();
@ -193,14 +193,14 @@ public class TestCopyTable {
Table t = TEST_UTIL.createTable(sourceTable, families);
Table t2 = TEST_UTIL.createTable(targetTable, families);
Put p = new Put(ROW1);
p.add(FAMILY_A, QUALIFIER, Bytes.toBytes("Data11"));
p.add(FAMILY_B, QUALIFIER, Bytes.toBytes("Data12"));
p.add(FAMILY_A, QUALIFIER, Bytes.toBytes("Data13"));
p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data11"));
p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data12"));
p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.add(FAMILY_B, QUALIFIER, Bytes.toBytes("Dat21"));
p.add(FAMILY_A, QUALIFIER, Bytes.toBytes("Data22"));
p.add(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23"));
p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Dat21"));
p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data22"));
p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23"));
t.put(p);
long currentTime = System.currentTimeMillis();

View File

@ -1006,7 +1006,7 @@ public class TestHFileOutputFormat {
// put some data in it and flush to create a storefile
Put p = new Put(Bytes.toBytes("test"));
p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
p.addColumn(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
table.put(p);
admin.flush(TABLE_NAME);
assertEquals(1, util.countRows(table));

View File

@ -1047,7 +1047,7 @@ public class TestHFileOutputFormat2 {
// put some data in it and flush to create a storefile
Put p = new Put(Bytes.toBytes("test"));
p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
p.addColumn(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
table.put(p);
admin.flush(TABLE_NAME);
assertEquals(1, util.countRows(table));

View File

@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
@ -172,14 +171,14 @@ public class TestImportExport {
String EXPORT_TABLE = "exportSimpleCase";
Table t = UTIL.createTable(TableName.valueOf(EXPORT_TABLE), FAMILYA, 3);
Put p = new Put(ROW1);
p.add(FAMILYA, QUAL, now, QUAL);
p.add(FAMILYA, QUAL, now+1, QUAL);
p.add(FAMILYA, QUAL, now+2, QUAL);
p.addColumn(FAMILYA, QUAL, now, QUAL);
p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
t.put(p);
p = new Put(ROW2);
p.add(FAMILYA, QUAL, now, QUAL);
p.add(FAMILYA, QUAL, now+1, QUAL);
p.add(FAMILYA, QUAL, now+2, QUAL);
p.addColumn(FAMILYA, QUAL, now, QUAL);
p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
t.put(p);
String[] args = new String[] {
@ -272,12 +271,12 @@ public class TestImportExport {
Table t = UTIL.getConnection().getTable(desc.getTableName());
Put p = new Put(ROW1);
p.add(FAMILYA, QUAL, now, QUAL);
p.add(FAMILYA, QUAL, now+1, QUAL);
p.add(FAMILYA, QUAL, now+2, QUAL);
p.add(FAMILYA, QUAL, now+3, QUAL);
p.add(FAMILYA, QUAL, now+4, QUAL);
t.put(p);
p.addColumn(FAMILYA, QUAL, now, QUAL);
p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
t.put(p);
String[] args = new String[] {
"-D" + Export.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added scanner batching arg.
@ -303,11 +302,11 @@ public class TestImportExport {
Table t = UTIL.getConnection().getTable(desc.getTableName());
Put p = new Put(ROW1);
p.add(FAMILYA, QUAL, now, QUAL);
p.add(FAMILYA, QUAL, now+1, QUAL);
p.add(FAMILYA, QUAL, now+2, QUAL);
p.add(FAMILYA, QUAL, now+3, QUAL);
p.add(FAMILYA, QUAL, now+4, QUAL);
p.addColumn(FAMILYA, QUAL, now, QUAL);
p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
t.put(p);
Delete d = new Delete(ROW1, now+3);
@ -371,7 +370,7 @@ public class TestImportExport {
//Add first version of QUAL
Put p = new Put(ROW1);
p.add(FAMILYA, QUAL, now, QUAL);
p.addColumn(FAMILYA, QUAL, now, QUAL);
exportT.put(p);
//Add Delete family marker
@ -380,7 +379,7 @@ public class TestImportExport {
//Add second version of QUAL
p = new Put(ROW1);
p.add(FAMILYA, QUAL, now+5, "s".getBytes());
p.addColumn(FAMILYA, QUAL, now + 5, "s".getBytes());
exportT.put(p);
//Add second Delete family marker
@ -447,15 +446,15 @@ public class TestImportExport {
Table exportTable = UTIL.getConnection().getTable(desc.getTableName());
Put p1 = new Put(ROW1);
p1.add(FAMILYA, QUAL, now, QUAL);
p1.add(FAMILYA, QUAL, now + 1, QUAL);
p1.add(FAMILYA, QUAL, now + 2, QUAL);
p1.add(FAMILYA, QUAL, now + 3, QUAL);
p1.add(FAMILYA, QUAL, now + 4, QUAL);
p1.addColumn(FAMILYA, QUAL, now, QUAL);
p1.addColumn(FAMILYA, QUAL, now + 1, QUAL);
p1.addColumn(FAMILYA, QUAL, now + 2, QUAL);
p1.addColumn(FAMILYA, QUAL, now + 3, QUAL);
p1.addColumn(FAMILYA, QUAL, now + 4, QUAL);
// Having another row would actually test the filter.
Put p2 = new Put(ROW2);
p2.add(FAMILYA, QUAL, now, QUAL);
p2.addColumn(FAMILYA, QUAL, now, QUAL);
exportTable.put(Arrays.asList(p1, p2));
@ -639,15 +638,15 @@ public class TestImportExport {
// Insert some data
Put put = new Put(ROW1);
put.add(FAMILYA, QUAL, now, QUAL);
put.add(FAMILYA, QUAL, now + 1, QUAL);
put.add(FAMILYA, QUAL, now + 2, QUAL);
put.addColumn(FAMILYA, QUAL, now, QUAL);
put.addColumn(FAMILYA, QUAL, now + 1, QUAL);
put.addColumn(FAMILYA, QUAL, now + 2, QUAL);
exportTable.put(put);
put = new Put(ROW2);
put.add(FAMILYA, QUAL, now, QUAL);
put.add(FAMILYA, QUAL, now + 1, QUAL);
put.add(FAMILYA, QUAL, now + 2, QUAL);
put.addColumn(FAMILYA, QUAL, now, QUAL);
put.addColumn(FAMILYA, QUAL, now + 1, QUAL);
put.addColumn(FAMILYA, QUAL, now + 2, QUAL);
exportTable.put(put);
// Run the export

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -116,7 +115,7 @@ public class TestMultithreadedTableMapper {
newValue.reverse();
// Now set the value to be collected
Put outval = new Put(key.get());
outval.add(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
outval.addColumn(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
context.write(key, outval);
}
}

View File

@ -155,13 +155,13 @@ public class TestRowCounter {
// clean up content of TABLE_NAME
Table table = TEST_UTIL.deleteTableData(TableName.valueOf(TABLE_NAME));
ts = System.currentTimeMillis();
put1.add(family, col1, ts, Bytes.toBytes("val1"));
put1.addColumn(family, col1, ts, Bytes.toBytes("val1"));
table.put(put1);
Thread.sleep(100);
ts = System.currentTimeMillis();
put2.add(family, col1, ts, Bytes.toBytes("val2"));
put3.add(family, col1, ts, Bytes.toBytes("val3"));
put2.addColumn(family, col1, ts, Bytes.toBytes("val2"));
put3.addColumn(family, col1, ts, Bytes.toBytes("val3"));
table.put(put2);
table.put(put3);
table.close();
@ -227,9 +227,9 @@ public class TestRowCounter {
for (; i < TOTAL_ROWS - ROWS_WITH_ONE_COL; i++) {
byte[] row = Bytes.toBytes("row" + i);
Put put = new Put(row);
put.add(family, col1, value);
put.add(family, col2, value);
put.add(family, col3, value);
put.addColumn(family, col1, value);
put.addColumn(family, col2, value);
put.addColumn(family, col3, value);
rowsUpdate.add(put);
}
@ -237,7 +237,7 @@ public class TestRowCounter {
for (; i < TOTAL_ROWS; i++) {
byte[] row = Bytes.toBytes("row" + i);
Put put = new Put(row);
put.add(family, col2, value);
put.addColumn(family, col2, value);
rowsUpdate.add(put);
}
table.put(rowsUpdate);

View File

@ -56,7 +56,6 @@ import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.junit.AfterClass;
import org.junit.Before;
@ -120,12 +119,12 @@ public class TestTableInputFormat {
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
Put p = new Put("aaa".getBytes());
for (byte[] family : families) {
p.add(family, null, "value aaa".getBytes());
p.addColumn(family, null, "value aaa".getBytes());
}
table.put(p);
p = new Put("bbb".getBytes());
for (byte[] family : families) {
p.add(family, null, "value bbb".getBytes());
p.addColumn(family, null, "value bbb".getBytes());
}
table.put(p);
return table;

View File

@ -85,7 +85,7 @@ public class TestTableMapReduce extends TestTableMapReduceBase {
newValue.reverse();
// Now set the value to be collected
Put outval = new Put(key.get());
outval.add(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
outval.addColumn(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
context.write(key, outval);
}
}

View File

@ -130,7 +130,7 @@ public abstract class TestTableMapReduceBase {
// Now set the value to be collected
Put outval = new Put(key.get());
outval.add(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
outval.addColumn(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
return outval;
}

View File

@ -121,7 +121,7 @@ public class TestTimeRangeMapRed {
for (Long ts : tsList) {
Put put = new Put(key.get());
put.setDurability(Durability.SKIP_WAL);
put.add(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true));
put.addColumn(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true));
puts.add(put);
}
table.put(puts);
@ -156,7 +156,7 @@ public class TestTimeRangeMapRed {
for (Map.Entry<Long, Boolean> entry : TIMESTAMP.entrySet()) {
Put put = new Put(KEY);
put.setDurability(Durability.SKIP_WAL);
put.add(FAMILY_NAME, COLUMN_NAME, entry.getKey(), Bytes.toBytes(false));
put.addColumn(FAMILY_NAME, COLUMN_NAME, entry.getKey(), Bytes.toBytes(false));
puts.add(put);
}
Table table = UTIL.getConnection().getTable(desc.getTableName());

View File

@ -97,8 +97,8 @@ public class TestWALPlayer {
// put a row into the first table
Put p = new Put(ROW);
p.add(FAMILY, COLUMN1, COLUMN1);
p.add(FAMILY, COLUMN2, COLUMN2);
p.addColumn(FAMILY, COLUMN1, COLUMN1);
p.addColumn(FAMILY, COLUMN2, COLUMN2);
t1.put(p);
// delete one column
Delete d = new Delete(ROW);

View File

@ -208,7 +208,7 @@ public class TestAssignmentListener {
for (int i = 0; i < 10; ++i) {
byte[] key = Bytes.toBytes("row-" + i);
Put put = new Put(key);
put.add(FAMILY, null, key);
put.addColumn(FAMILY, null, key);
table.put(put);
}
} finally {

Some files were not shown because too many files have changed in this diff Show More