HBASE-12802 Remove unnecessary Table.flushCommits() (Solomon Duskis)

This commit is contained in:
tedyu 2015-01-05 15:45:52 -08:00
parent 5b53a187d2
commit 2452d3861f
26 changed files with 425 additions and 643 deletions

View File

@ -486,7 +486,6 @@ public class IntegrationTestMTTR {
Put p = new Put(Bytes.toBytes(RandomStringUtils.randomAlphanumeric(5)));
p.add(FAMILY, Bytes.toBytes("\0"), Bytes.toBytes(RandomStringUtils.randomAscii(5)));
table.put(p);
table.flushCommits();
return true;
}

View File

@ -927,9 +927,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
= new org.apache.hadoop.hbase.client.Delete(val);
Table table = new HTable(getConf(), getTableName(getConf()));
table.delete(delete);
table.flushCommits();
table.close();
System.out.println("Delete successful");

View File

@ -19,10 +19,17 @@
package org.apache.hadoop.hbase.rest;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import javax.xml.bind.JAXBContext;
@ -32,11 +39,16 @@ import javax.xml.bind.Unmarshaller;
import org.apache.commons.httpclient.Header;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.rest.client.Client;
import org.apache.hadoop.hbase.rest.client.Cluster;
@ -48,9 +60,6 @@ import org.apache.hadoop.hbase.rest.model.ScannerModel;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RestTests;
import org.apache.hadoop.hbase.util.Bytes;
import static org.junit.Assert.*;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -79,10 +88,9 @@ public class TestScannerResource {
static int insertData(Configuration conf, TableName tableName, String column, double prob)
throws IOException {
Random rng = new Random();
int count = 0;
Table table = new HTable(conf, tableName);
byte[] k = new byte[3];
byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
List<Put> puts = new ArrayList<>();
for (byte b1 = 'a'; b1 < 'z'; b1++) {
for (byte b2 = 'a'; b2 < 'z'; b2++) {
for (byte b3 = 'a'; b3 < 'z'; b3++) {
@ -93,15 +101,16 @@ public class TestScannerResource {
Put put = new Put(k);
put.setDurability(Durability.SKIP_WAL);
put.add(famAndQf[0], famAndQf[1], k);
table.put(put);
count++;
puts.add(put);
}
}
}
}
table.flushCommits();
table.close();
return count;
try (Connection conn = ConnectionFactory.createConnection(conf);
Table table = conn.getTable(tableName)) {
table.put(puts);
}
return puts.size();
}
static int countCellSet(CellSetModel model) {

View File

@ -24,7 +24,9 @@ import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.StringWriter;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import javax.xml.bind.JAXBContext;
@ -90,23 +92,22 @@ public class TestScannersWithLabels {
private static Configuration conf;
private static int insertData(TableName tableName, String column, double prob) throws IOException {
Random rng = new Random();
int count = 0;
Table table = new HTable(TEST_UTIL.getConfiguration(), tableName);
byte[] k = new byte[3];
byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
List<Put> puts = new ArrayList<>();
for (int i = 0; i < 9; i++) {
Put put = new Put(Bytes.toBytes("row" + i));
put.setDurability(Durability.SKIP_WAL);
put.add(famAndQf[0], famAndQf[1], k);
put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
+ TOPSECRET));
table.put(put);
count++;
puts.add(put);
}
table.flushCommits();
return count;
try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) {
table.put(puts);
}
return puts.size();
}
private static int countCellSet(CellSetModel model) {

View File

@ -1739,7 +1739,6 @@ public class HBaseFsck extends Configured implements Closeable {
private void deleteMetaRegion(byte[] metaKey) throws IOException {
Delete d = new Delete(metaKey);
meta.delete(d);
meta.flushCommits();
LOG.info("Deleted " + Bytes.toString(metaKey) + " from META" );
}
@ -1760,7 +1759,6 @@ public class HBaseFsck extends Configured implements Closeable {
mutations.add(p);
meta.mutateRow(mutations);
meta.flushCommits();
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}

View File

@ -1878,7 +1878,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final HTable t, final byte[] f) throws IOException {
public int loadTable(final Table t, final byte[] f) throws IOException {
return loadTable(t, new byte[][] {f});
}
@ -1889,7 +1889,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
return loadTable(t, new byte[][] {f}, null, writeToWAL);
}
@ -1900,7 +1900,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final HTable t, final byte[][] f) throws IOException {
public int loadTable(final Table t, final byte[][] f) throws IOException {
return loadTable(t, f, null);
}
@ -1912,7 +1912,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
return loadTable(t, f, value, true);
}
@ -1924,20 +1924,18 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
t.setAutoFlushTo(false);
int rowCount = 0;
public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
List<Put> puts = new ArrayList<>();
for (byte[] row : HBaseTestingUtility.ROWS) {
Put put = new Put(row);
put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
for (int i = 0; i < f.length; i++) {
put.add(f[i], null, value != null ? value : row);
}
t.put(put);
rowCount++;
puts.add(put);
}
t.flushCommits();
return rowCount;
t.put(puts);
return puts.size();
}
/** A tracker for tracking and validating table rows

View File

@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
@ -119,8 +120,17 @@ public class TestFastFail {
/**
* Write numRows worth of data, so that the workers can arbitrarily read.
*/
try (Table table = connection.getTable(TableName.valueOf(tableName));) {
writeData(table, numRows);
List<Put> puts = new ArrayList<>();
for (long i = 0; i < numRows; i++) {
byte[] rowKey = longToByteArrayKey(i);
Put put = new Put(rowKey);
byte[] value = rowKey; // value is the same as the row key
put.add(FAMILY, QUALIFIER, value);
puts.add(put);
}
try (Table table = connection.getTable(TableName.valueOf(tableName))) {
table.put(puts);
LOG.info("Written all puts.");
}
/**
@ -297,17 +307,4 @@ public class TestFastFail {
private byte[] longToByteArrayKey(long rowKey) {
return LoadTestKVGenerator.md5PrefixedKey(rowKey).getBytes();
}
public void writeData(Table table, long numRows) throws IOException,
InterruptedException {
table.flushCommits();
for (long i = 0; i < numRows; i++) {
byte[] rowKey = longToByteArrayKey(i);
Put put = new Put(rowKey);
byte[] value = rowKey; // value is the same as the row key
put.add(FAMILY, QUALIFIER, value);
table.put(put);
}
LOG.info("Written all puts.");
}
}

View File

@ -5506,22 +5506,17 @@ public class TestFromClientSide {
Put p = new Put(row);
p.add(FAMILY, QUALIFIER, 10, VALUE);
table.put(p);
table.flushCommits();
p = new Put(row);
p.add(FAMILY, QUALIFIER, 11, ArrayUtils.add(VALUE, (byte) 2));
table.put(p);
table.flushCommits();
p = new Put(row);
p.add(FAMILY, QUALIFIER, 12, ArrayUtils.add(VALUE, (byte) 3));
table.put(p);
table.flushCommits();
p = new Put(row);
p.add(FAMILY, QUALIFIER, 13, ArrayUtils.add(VALUE, (byte) 4));
table.put(p);
table.flushCommits();
int versions = 4;
Scan s = new Scan(row);
@ -5642,7 +5637,6 @@ public class TestFromClientSide {
put = new Put(Bytes.toBytes("0-b22222-0000000000000000009"));
put.add(FAMILY, QUALIFIER, VALUE);
ht.put(put);
ht.flushCommits();
Scan scan = new Scan(Bytes.toBytes("0-b11111-9223372036854775807"),
Bytes.toBytes("0-b11111-0000000000000000000"));
scan.setReversed(true);

View File

@ -413,7 +413,6 @@ public class TestFromClientSide3 {
Put put = new Put(ROW_BYTES);
put.add(FAMILY, COL_QUAL, VAL_BYTES);
table.put(put);
table.flushCommits();
//Try getting the row with an empty row key
Result res = null;

View File

@ -173,7 +173,7 @@ public class TestSnapshotCloneIndependence {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
if (t.getRegionLocations().size() > originalCount) {
if (t.getAllRegionLocations().size() > originalCount) {
return;
}
}
@ -194,9 +194,7 @@ public class TestSnapshotCloneIndependence {
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
HTable original = UTIL.createTable(localTableName, TEST_FAM);
try {
try (Table original = UTIL.createTable(localTableName, TEST_FAM)) {
UTIL.loadTable(original, TEST_FAM);
final int origTableRowCount = UTIL.countRows(original);
@ -213,9 +211,7 @@ public class TestSnapshotCloneIndependence {
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
admin.cloneSnapshot(snapshotName, cloneTableName);
Table clonedTable = new HTable(UTIL.getConfiguration(), cloneTableName);
try {
try (Table clonedTable = new HTable(UTIL.getConfiguration(), cloneTableName)){
final int clonedTableRowCount = UTIL.countRows(clonedTable);
Assert.assertEquals(
@ -228,7 +224,6 @@ public class TestSnapshotCloneIndependence {
Put p = new Put(Bytes.toBytes(rowKey));
p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
original.put(p);
original.flushCommits();
// Verify that it is not present in the original table
Assert.assertEquals("The row count of the original table was not modified by the put",
@ -240,7 +235,6 @@ public class TestSnapshotCloneIndependence {
p = new Put(Bytes.toBytes(rowKey));
p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
clonedTable.put(p);
clonedTable.flushCommits();
// Verify that the new family is not in the restored table's description
Assert.assertEquals(
@ -248,13 +242,7 @@ public class TestSnapshotCloneIndependence {
origTableRowCount + 1, UTIL.countRows(original));
Assert.assertEquals("The row count of the cloned table was not modified by the put",
clonedTableRowCount + 1, UTIL.countRows(clonedTable));
} finally {
clonedTable.close();
}
} finally {
original.close();
}
}

View File

@ -71,7 +71,6 @@ public class TestOpenTableInCoprocessor {
final WALEdit edit, final Durability durability) throws IOException {
Table table = e.getEnvironment().getTable(otherTable);
table.put(put);
table.flushCommits();
completed[0] = true;
table.close();
}
@ -166,7 +165,6 @@ public class TestOpenTableInCoprocessor {
Put p = new Put(new byte[] { 'a' });
p.add(family, null, new byte[] { 'a' });
table.put(p);
table.flushCommits();
table.close();
Table target = new HTable(UTIL.getConfiguration(), otherTable);

View File

@ -244,12 +244,10 @@ public class TestRegionObserverInterface {
public void testCheckAndPutHooks() throws IOException {
TableName tableName =
TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks");
Table table = util.createTable(tableName, new byte[][] {A, B, C});
try {
try (Table table = util.createTable(tableName, new byte[][] {A, B, C})) {
Put p = new Put(Bytes.toBytes(0));
p.add(A, A, A);
table.put(p);
table.flushCommits();
p = new Put(Bytes.toBytes(0));
p.add(A, A, A);
verifyMethodResult(SimpleRegionObserver.class,
@ -267,7 +265,6 @@ public class TestRegionObserverInterface {
);
} finally {
util.deleteTable(tableName);
table.close();
}
}
@ -280,7 +277,6 @@ public class TestRegionObserverInterface {
Put p = new Put(Bytes.toBytes(0));
p.add(A, A, A);
table.put(p);
table.flushCommits();
Delete d = new Delete(Bytes.toBytes(0));
table.delete(d);
verifyMethodResult(SimpleRegionObserver.class,

View File

@ -265,7 +265,6 @@ public class TestRegionObserverScannerOpenHook {
Put put = new Put(ROW);
put.add(A, A, A);
table.put(put);
table.flushCommits();
HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
@ -279,7 +278,6 @@ public class TestRegionObserverScannerOpenHook {
put = new Put(Bytes.toBytes("anotherrow"));
put.add(A, A, A);
table.put(put);
table.flushCommits();
admin.flushRegion(region.getRegionName());
// run a compaction, which normally would should get rid of the data

View File

@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -128,8 +130,7 @@ public class TestChangingEncoding {
static void writeTestDataBatch(Configuration conf, TableName tableName,
int batchId) throws Exception {
LOG.debug("Writing test data batch " + batchId);
Table table = new HTable(conf, tableName);
table.setAutoFlushTo(false);
List<Put> puts = new ArrayList<>();
for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
Put put = new Put(getRowKey(batchId, i));
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
@ -137,10 +138,12 @@ public class TestChangingEncoding {
getValue(batchId, i, j));
}
put.setDurability(Durability.SKIP_WAL);
table.put(put);
puts.add(put);
}
try (Connection conn = ConnectionFactory.createConnection(conf);
Table table = conn.getTable(tableName)) {
table.put(puts);
}
table.flushCommits();
table.close();
}
static void verifyTestDataBatch(Configuration conf, TableName tableName,

View File

@ -94,23 +94,18 @@ public class TestPrefixTree {
Put put = new Put(Bytes.toBytes("a-b-0-0"));
put.add(fam, qual1, Bytes.toBytes("c1-value"));
table.put(put);
table.flushCommits();
put = new Put(row1_bytes);
put.add(fam, qual1, Bytes.toBytes("c1-value"));
table.put(put);
table.flushCommits();
put = new Put(row2_bytes);
put.add(fam, qual2, Bytes.toBytes("c2-value"));
table.put(put);
table.flushCommits();
put = new Put(row3_bytes);
put.add(fam, qual2, Bytes.toBytes("c2-value-2"));
table.put(put);
table.flushCommits();
put = new Put(row4_bytes);
put.add(fam, qual2, Bytes.toBytes("c2-value-3"));
table.put(put);
table.flushCommits();
hBaseAdmin.flush(tableName);
String[] rows = new String[3];
rows[0] = row1;

View File

@ -31,6 +31,7 @@ import java.io.IOException;
import java.io.PrintStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
@ -430,20 +431,18 @@ public class TestImportExport {
UTIL.getHBaseAdmin().createTable(desc);
Table exportTable = new HTable(UTIL.getConfiguration(), desc.getTableName());
Put p = new Put(ROW1);
p.add(FAMILYA, QUAL, now, QUAL);
p.add(FAMILYA, QUAL, now + 1, QUAL);
p.add(FAMILYA, QUAL, now + 2, QUAL);
p.add(FAMILYA, QUAL, now + 3, QUAL);
p.add(FAMILYA, QUAL, now + 4, QUAL);
exportTable.put(p);
Put p1 = new Put(ROW1);
p1.add(FAMILYA, QUAL, now, QUAL);
p1.add(FAMILYA, QUAL, now + 1, QUAL);
p1.add(FAMILYA, QUAL, now + 2, QUAL);
p1.add(FAMILYA, QUAL, now + 3, QUAL);
p1.add(FAMILYA, QUAL, now + 4, QUAL);
// Having another row would actually test the filter.
p = new Put(ROW2);
p.add(FAMILYA, QUAL, now, QUAL);
exportTable.put(p);
// Flush the commits.
exportTable.flushCommits();
Put p2 = new Put(ROW2);
p2.add(FAMILYA, QUAL, now, QUAL);
exportTable.put(Arrays.asList(p1, p2));
// Export the simple table
String[] args = new String[] { EXPORT_TABLE, FQ_OUTPUT_DIR, "1000" };

View File

@ -118,13 +118,14 @@ public class TestTimeRangeMapRed {
tsList.add(kv.getTimestamp());
}
List<Put> puts = new ArrayList<>();
for (Long ts : tsList) {
Put put = new Put(key.get());
put.setDurability(Durability.SKIP_WAL);
put.add(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true));
table.put(put);
puts.add(put);
}
table.flushCommits();
table.put(puts);
}
@Override
@ -151,20 +152,18 @@ public class TestTimeRangeMapRed {
col.setMaxVersions(Integer.MAX_VALUE);
desc.addFamily(col);
admin.createTable(desc);
Table table = new HTable(UTIL.getConfiguration(), desc.getTableName());
prepareTest(table);
runTestOnTable();
verify(table);
}
private void prepareTest(final Table table) throws IOException {
List<Put> puts = new ArrayList<Put>();
for (Map.Entry<Long, Boolean> entry : TIMESTAMP.entrySet()) {
Put put = new Put(KEY);
put.setDurability(Durability.SKIP_WAL);
put.add(FAMILY_NAME, COLUMN_NAME, entry.getKey(), Bytes.toBytes(false));
table.put(put);
puts.add(put);
}
table.flushCommits();
Table table = new HTable(UTIL.getConfiguration(), desc.getTableName());
table.put(puts);
runTestOnTable();
verify(table);
table.close();
}
private void runTestOnTable()

View File

@ -227,7 +227,6 @@ public class TestCompactionState {
puts.add(p);
}
ht.put(puts);
ht.flushCommits();
TEST_UTIL.flush();
puts.clear();
}

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
@ -284,13 +285,13 @@ public class TestEndToEndSplitTransaction {
}
void addData(int start) throws IOException {
List<Put> puts = new ArrayList<>();
for (int i=start; i< start + 100; i++) {
Put put = new Put(Bytes.toBytes(i));
put.add(family, family, Bytes.toBytes(i));
table.put(put);
puts.add(put);
}
table.flushCommits();
table.put(puts);
}
}

View File

@ -198,7 +198,6 @@ public class TestRegionServerMetrics {
p.setDurability(Durability.SKIP_WAL);
t.put(p);
t.flushCommits();
metricsRegionServer.getRegionServerWrapper().forceRecompute();
metricsHelper.assertGauge("mutationsWithoutWALCount", 1, serverSource);
@ -227,7 +226,6 @@ public class TestRegionServerMetrics {
Put p = new Put(row);
p.add(cf, qualifier, val);
t.put(p);
t.flushCommits();
TEST_UTIL.getHBaseAdmin().flush(tableName);
metricsRegionServer.getRegionServerWrapper().forceRecompute();
@ -253,18 +251,14 @@ public class TestRegionServerMetrics {
Put p = new Put(row);
p.add(cf, qualifier, valOne);
t.put(p);
t.flushCommits();
Put pTwo = new Put(row);
pTwo.add(cf, qualifier, valTwo);
t.checkAndPut(row, cf, qualifier, valOne, pTwo);
t.flushCommits();
Put pThree = new Put(row);
pThree.add(cf, qualifier, valThree);
t.checkAndPut(row, cf, qualifier, valOne, pThree);
t.flushCommits();
metricsRegionServer.getRegionServerWrapper().forceRecompute();
metricsHelper.assertCounter("checkMutateFailedCount", 1, serverSource);
@ -289,7 +283,6 @@ public class TestRegionServerMetrics {
Put p = new Put(row);
p.add(cf, qualifier, val);
t.put(p);
t.flushCommits();
for(int count = 0; count< 13; count++) {
Increment inc = new Increment(row);
@ -297,8 +290,6 @@ public class TestRegionServerMetrics {
t.increment(inc);
}
t.flushCommits();
metricsRegionServer.getRegionServerWrapper().forceRecompute();
metricsHelper.assertCounter("incrementNumOps", 13, serverSource);
@ -321,7 +312,6 @@ public class TestRegionServerMetrics {
Put p = new Put(row);
p.add(cf, qualifier, val);
t.put(p);
t.flushCommits();
for(int count = 0; count< 73; count++) {
Append append = new Append(row);
@ -329,8 +319,6 @@ public class TestRegionServerMetrics {
t.append(append);
}
t.flushCommits();
metricsRegionServer.getRegionServerWrapper().forceRecompute();
metricsHelper.assertCounter("appendNumOps", 73, serverSource);

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@ -245,15 +246,14 @@ public class TestReplicationSmallTests extends TestReplicationBase {
@Test(timeout=300000)
public void testSmallBatch() throws Exception {
LOG.info("testSmallBatch");
Put put;
// normal Batch tests
htable1.setAutoFlushTo(false);
List<Put> puts = new ArrayList<>();
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
put = new Put(Bytes.toBytes(i));
Put put = new Put(Bytes.toBytes(i));
put.add(famName, row, row);
htable1.put(put);
puts.add(put);
}
htable1.flushCommits();
htable1.put(puts);
Scan scan = new Scan();
@ -386,14 +386,16 @@ public class TestReplicationSmallTests extends TestReplicationBase {
@Test(timeout=300000)
public void testLoading() throws Exception {
LOG.info("Writing out rows to table1 in testLoading");
htable1.setWriteBufferSize(1024);
((HTable)htable1).setAutoFlushTo(false);
List<Put> puts = new ArrayList<Put>();
for (int i = 0; i < NB_ROWS_IN_BIG_BATCH; i++) {
Put put = new Put(Bytes.toBytes(i));
put.add(famName, row, row);
htable1.put(put);
puts.add(put);
}
htable1.flushCommits();
htable1.setWriteBufferSize(1024);
// The puts will be iterated through and flushed only when the buffer
// size is reached.
htable1.put(puts);
Scan scan = new Scan();

View File

@ -895,24 +895,15 @@ public abstract class TestVisibilityLabels {
static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
throws Exception {
Table table = null;
try {
table = TEST_UTIL.createTable(tableName, fam);
int i = 1;
List<Put> puts = new ArrayList<Put>();
for (String labelExp : labelExps) {
Put put = new Put(Bytes.toBytes("row" + i));
put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
put.setCellVisibility(new CellVisibility(labelExp));
puts.add(put);
i++;
}
table.put(puts);
} finally {
if (table != null) {
table.flushCommits();
}
List<Put> puts = new ArrayList<Put>();
for (int i = 0; i < labelExps.length; i++) {
Put put = new Put(Bytes.toBytes("row" + (i+1)));
put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
put.setCellVisibility(new CellVisibility(labelExps[i]));
puts.add(put);
}
Table table = TEST_UTIL.createTable(tableName, fam);
table.put(puts);
return table;
}

View File

@ -699,7 +699,6 @@ public class TestHBaseFsck {
//add a location with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), 2);
meta.put(put);
meta.flushCommits();
// assign the new replica
HBaseFsckRepair.fixUnassigned(admin, newHri);
HBaseFsckRepair.waitUntilAssigned(admin, newHri);
@ -709,7 +708,6 @@ public class TestHBaseFsck {
delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(2));
delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(2));
meta.delete(delete);
meta.flushCommits();
meta.close();
// check that problem exists
HBaseFsck hbck = doFsck(conf, false);
@ -1529,7 +1527,6 @@ public class TestHBaseFsck {
hri.setSplit(true);
MetaTableAccessor.addRegionToMeta(meta, hri, a, b);
meta.flushCommits();
meta.close();
admin.flush(TableName.META_TABLE_NAME);
@ -1655,9 +1652,10 @@ public class TestHBaseFsck {
undeployRegion(connection, hris.get(daughters.getFirst()), daughters.getFirst());
undeployRegion(connection, hris.get(daughters.getSecond()), daughters.getSecond());
meta.delete(new Delete(daughters.getFirst().getRegionName()));
meta.delete(new Delete(daughters.getSecond().getRegionName()));
meta.flushCommits();
List<Delete> deletes = new ArrayList<>();
deletes.add(new Delete(daughters.getFirst().getRegionName()));
deletes.add(new Delete(daughters.getSecond().getRegionName()));
meta.delete(deletes);
// Remove daughters from regionStates
RegionStates regionStates = TEST_UTIL.getMiniHBaseCluster().getMaster().

View File

@ -31,14 +31,14 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@ -147,15 +147,16 @@ public class OfflineMetaRebuildTestCore {
private void populateTable(Table tbl) throws IOException {
byte[] values = { 'A', 'B', 'C', 'D' };
List<Put> puts = new ArrayList<>();
for (int i = 0; i < values.length; i++) {
for (int j = 0; j < values.length; j++) {
Put put = new Put(new byte[] { values[i], values[j] });
put.add(Bytes.toBytes("fam"), new byte[] {}, new byte[] { values[i],
values[j] });
tbl.put(put);
puts.add(put);
}
}
tbl.flushCommits();
tbl.put(puts);
}
/**
@ -253,7 +254,6 @@ public class OfflineMetaRebuildTestCore {
}
}
meta.delete(dels);
meta.flushCommits();
scanner.close();
meta.close();
}

View File

@ -91,14 +91,13 @@ public class TestWALFiltering {
ts + "_random_" + rand.nextLong());
put.add(cf, qual, ts, value);
} else if (rand.nextDouble() < 0.8) {
del.deleteColumn(cf, qual, ts);
del.addColumn(cf, qual, ts);
} else {
del.deleteColumns(cf, qual, ts);
del.addColumn(cf, qual, ts);
}
}
table.put(put);
table.delete(del);
table.flushCommits();
}
}
TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);