HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace when getting WAL
This commit is contained in:
parent
62a8188946
commit
71a1192d67
|
@ -24,30 +24,28 @@ import static org.junit.Assert.assertTrue;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALKeyRecordReader;
|
import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALKeyRecordReader;
|
||||||
import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALRecordReader;
|
import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALRecordReader;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
|
||||||
import org.apache.hadoop.hbase.wal.WAL;
|
|
||||||
import org.apache.hadoop.hbase.wal.WALFactory;
|
|
||||||
import org.apache.hadoop.hbase.wal.WALKeyImpl;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
import org.apache.hadoop.hbase.wal.WAL;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALFactory;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALKey;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALKeyImpl;
|
||||||
import org.apache.hadoop.mapreduce.InputSplit;
|
import org.apache.hadoop.mapreduce.InputSplit;
|
||||||
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
|
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
|
@ -61,7 +59,7 @@ import org.slf4j.LoggerFactory;
|
||||||
/**
|
/**
|
||||||
* JUnit tests for the WALRecordReader
|
* JUnit tests for the WALRecordReader
|
||||||
*/
|
*/
|
||||||
@Category({MapReduceTests.class, MediumTests.class})
|
@Category({ MapReduceTests.class, MediumTests.class })
|
||||||
public class TestWALRecordReader {
|
public class TestWALRecordReader {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(TestWALRecordReader.class);
|
private static final Logger LOG = LoggerFactory.getLogger(TestWALRecordReader.class);
|
||||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
|
@ -74,11 +72,9 @@ public class TestWALRecordReader {
|
||||||
static final TableName tableName = TableName.valueOf(getName());
|
static final TableName tableName = TableName.valueOf(getName());
|
||||||
private static final byte [] rowName = tableName.getName();
|
private static final byte [] rowName = tableName.getName();
|
||||||
// visible for TestHLogRecordReader
|
// visible for TestHLogRecordReader
|
||||||
static final HRegionInfo info = new HRegionInfo(tableName,
|
static final RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
Bytes.toBytes(""), Bytes.toBytes(""), false);
|
private static final byte[] family = Bytes.toBytes("column");
|
||||||
private static final byte [] family = Bytes.toBytes("column");
|
private static final byte[] value = Bytes.toBytes("value");
|
||||||
private static final byte [] value = Bytes.toBytes("value");
|
|
||||||
private static HTableDescriptor htd;
|
|
||||||
private static Path logDir;
|
private static Path logDir;
|
||||||
protected MultiVersionConcurrencyControl mvcc;
|
protected MultiVersionConcurrencyControl mvcc;
|
||||||
protected static NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
protected static NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
|
@ -93,6 +89,7 @@ public class TestWALRecordReader {
|
||||||
walFs.delete(walRootDir, true);
|
walFs.delete(walRootDir, true);
|
||||||
mvcc = new MultiVersionConcurrencyControl();
|
mvcc = new MultiVersionConcurrencyControl();
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUpBeforeClass() throws Exception {
|
public static void setUpBeforeClass() throws Exception {
|
||||||
// Make block sizes small.
|
// Make block sizes small.
|
||||||
|
@ -108,9 +105,6 @@ public class TestWALRecordReader {
|
||||||
walRootDir = TEST_UTIL.createWALRootDir();
|
walRootDir = TEST_UTIL.createWALRootDir();
|
||||||
walFs = FSUtils.getWALFileSystem(conf);
|
walFs = FSUtils.getWALFileSystem(conf);
|
||||||
logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
|
logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
|
||||||
|
|
||||||
htd = new HTableDescriptor(tableName);
|
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
@ -127,7 +121,7 @@ public class TestWALRecordReader {
|
||||||
@Test
|
@Test
|
||||||
public void testPartialRead() throws Exception {
|
public void testPartialRead() throws Exception {
|
||||||
final WALFactory walfactory = new WALFactory(conf, null, getName());
|
final WALFactory walfactory = new WALFactory(conf, null, getName());
|
||||||
WAL log = walfactory.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
|
WAL log = walfactory.getWAL(info);
|
||||||
// This test depends on timestamp being millisecond based and the filename of the WAL also
|
// This test depends on timestamp being millisecond based and the filename of the WAL also
|
||||||
// being millisecond based.
|
// being millisecond based.
|
||||||
long ts = System.currentTimeMillis();
|
long ts = System.currentTimeMillis();
|
||||||
|
@ -186,9 +180,8 @@ public class TestWALRecordReader {
|
||||||
@Test
|
@Test
|
||||||
public void testWALRecordReader() throws Exception {
|
public void testWALRecordReader() throws Exception {
|
||||||
final WALFactory walfactory = new WALFactory(conf, null, getName());
|
final WALFactory walfactory = new WALFactory(conf, null, getName());
|
||||||
WAL log = walfactory.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
|
WAL log = walfactory.getWAL(info);
|
||||||
byte [] value = Bytes.toBytes("value");
|
byte [] value = Bytes.toBytes("value");
|
||||||
final AtomicLong sequenceId = new AtomicLong(0);
|
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
|
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
|
||||||
System.currentTimeMillis(), value));
|
System.currentTimeMillis(), value));
|
||||||
|
@ -245,7 +238,7 @@ public class TestWALRecordReader {
|
||||||
return new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, time, mvcc, scopes);
|
return new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, time, mvcc, scopes);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected WALRecordReader getReader() {
|
private WALRecordReader<WALKey> getReader() {
|
||||||
return new WALKeyRecordReader();
|
return new WALKeyRecordReader();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,7 +246,7 @@ public class TestWALRecordReader {
|
||||||
* Create a new reader from the split, and match the edits against the passed columns.
|
* Create a new reader from the split, and match the edits against the passed columns.
|
||||||
*/
|
*/
|
||||||
private void testSplit(InputSplit split, byte[]... columns) throws Exception {
|
private void testSplit(InputSplit split, byte[]... columns) throws Exception {
|
||||||
final WALRecordReader reader = getReader();
|
WALRecordReader<WALKey> reader = getReader();
|
||||||
reader.initialize(split, MapReduceTestUtil.createDummyMapTaskAttemptContext(conf));
|
reader.initialize(split, MapReduceTestUtil.createDummyMapTaskAttemptContext(conf));
|
||||||
|
|
||||||
for (byte[] column : columns) {
|
for (byte[] column : columns) {
|
||||||
|
@ -262,15 +255,12 @@ public class TestWALRecordReader {
|
||||||
if (!Bytes.equals(column, 0, column.length, cell.getQualifierArray(),
|
if (!Bytes.equals(column, 0, column.length, cell.getQualifierArray(),
|
||||||
cell.getQualifierOffset(), cell.getQualifierLength())) {
|
cell.getQualifierOffset(), cell.getQualifierLength())) {
|
||||||
assertTrue(
|
assertTrue(
|
||||||
"expected ["
|
"expected [" + Bytes.toString(column) + "], actual [" + Bytes.toString(
|
||||||
+ Bytes.toString(column)
|
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]",
|
||||||
+ "], actual ["
|
false);
|
||||||
+ Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(),
|
|
||||||
cell.getQualifierLength()) + "]", false);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assertFalse(reader.nextKeyValue());
|
assertFalse(reader.nextKeyValue());
|
||||||
reader.close();
|
reader.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2116,8 +2116,6 @@ public class HRegionServer extends HasThread implements
|
||||||
return healthy;
|
return healthy;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final byte[] UNSPECIFIED_REGION = new byte[]{};
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<WAL> getWALs() throws IOException {
|
public List<WAL> getWALs() throws IOException {
|
||||||
return walFactory.getWALs();
|
return walFactory.getWALs();
|
||||||
|
@ -2125,17 +2123,7 @@ public class HRegionServer extends HasThread implements
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public WAL getWAL(RegionInfo regionInfo) throws IOException {
|
public WAL getWAL(RegionInfo regionInfo) throws IOException {
|
||||||
WAL wal;
|
WAL wal = walFactory.getWAL(regionInfo);
|
||||||
// _ROOT_ and hbase:meta regions have separate WAL.
|
|
||||||
if (regionInfo != null && regionInfo.isMetaRegion()
|
|
||||||
&& regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
|
|
||||||
wal = walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes());
|
|
||||||
} else if (regionInfo == null) {
|
|
||||||
wal = walFactory.getWAL(UNSPECIFIED_REGION, null);
|
|
||||||
} else {
|
|
||||||
byte[] namespace = regionInfo.getTable().getNamespace();
|
|
||||||
wal = walFactory.getWAL(regionInfo.getEncodedNameAsBytes(), namespace);
|
|
||||||
}
|
|
||||||
if (this.walRoller != null) {
|
if (this.walRoller != null) {
|
||||||
this.walRoller.addWAL(wal);
|
this.walRoller.addWAL(wal);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1482,9 +1482,9 @@ public class HBaseFsck extends Configured implements Closeable {
|
||||||
// unless I pass along via the conf.
|
// unless I pass along via the conf.
|
||||||
Configuration confForWAL = new Configuration(c);
|
Configuration confForWAL = new Configuration(c);
|
||||||
confForWAL.set(HConstants.HBASE_DIR, rootdir.toString());
|
confForWAL.set(HConstants.HBASE_DIR, rootdir.toString());
|
||||||
WAL wal = (new WALFactory(confForWAL,
|
WAL wal =
|
||||||
Collections.<WALActionsListener> singletonList(new MetricsWAL()), walFactoryID))
|
new WALFactory(confForWAL, Collections.<WALActionsListener> singletonList(new MetricsWAL()),
|
||||||
.getWAL(metaHRI.getEncodedNameAsBytes(), metaHRI.getTable().getNamespace());
|
walFactoryID).getWAL(metaHRI);
|
||||||
HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor, wal);
|
HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor, wal);
|
||||||
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true);
|
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true);
|
||||||
return meta;
|
return meta;
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.yetus.audience.InterfaceStability;
|
import org.apache.yetus.audience.InterfaceStability;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -128,7 +129,7 @@ public abstract class AbstractFSWALProvider<T extends AbstractFSWAL<?>> implemen
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public T getWAL(byte[] identifier, byte[] namespace) throws IOException {
|
public T getWAL(RegionInfo region) throws IOException {
|
||||||
T walCopy = wal;
|
T walCopy = wal;
|
||||||
if (walCopy == null) {
|
if (walCopy == null) {
|
||||||
// only lock when need to create wal, and need to lock since
|
// only lock when need to create wal, and need to lock since
|
||||||
|
|
|
@ -74,7 +74,7 @@ class DisabledWALProvider implements WALProvider {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public WAL getWAL(final byte[] identifier, byte[] namespace) throws IOException {
|
public WAL getWAL(RegionInfo region) throws IOException {
|
||||||
return disabled;
|
return disabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,15 +27,17 @@ import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
import java.util.concurrent.locks.Lock;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.slf4j.Logger;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
// imports for classes still in regionserver.wal
|
// imports for classes still in regionserver.wal
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.IdLock;
|
import org.apache.hadoop.hbase.util.KeyLocker;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A WAL Provider that returns a WAL per group of regions.
|
* A WAL Provider that returns a WAL per group of regions.
|
||||||
|
@ -131,7 +133,7 @@ public class RegionGroupingProvider implements WALProvider {
|
||||||
/** A group-provider mapping, make sure one-one rather than many-one mapping */
|
/** A group-provider mapping, make sure one-one rather than many-one mapping */
|
||||||
private final ConcurrentMap<String, WALProvider> cached = new ConcurrentHashMap<>();
|
private final ConcurrentMap<String, WALProvider> cached = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
private final IdLock createLock = new IdLock();
|
private final KeyLocker<String> createLock = new KeyLocker<>();
|
||||||
|
|
||||||
private RegionGroupingStrategy strategy = null;
|
private RegionGroupingStrategy strategy = null;
|
||||||
private WALFactory factory = null;
|
private WALFactory factory = null;
|
||||||
|
@ -177,33 +179,39 @@ public class RegionGroupingProvider implements WALProvider {
|
||||||
return wals;
|
return wals;
|
||||||
}
|
}
|
||||||
|
|
||||||
private WAL getWAL(final String group) throws IOException {
|
private WAL getWAL(String group) throws IOException {
|
||||||
WALProvider provider = cached.get(group);
|
WALProvider provider = cached.get(group);
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
IdLock.Entry lockEntry = null;
|
Lock lock = createLock.acquireLock(group);
|
||||||
try {
|
try {
|
||||||
lockEntry = createLock.getLockEntry(group.hashCode());
|
|
||||||
provider = cached.get(group);
|
provider = cached.get(group);
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
provider = createProvider(group);
|
provider = createProvider(group);
|
||||||
cached.put(group, provider);
|
cached.put(group, provider);
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (lockEntry != null) {
|
lock.unlock();
|
||||||
createLock.releaseLockEntry(lockEntry);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return provider.getWAL(null, null);
|
return provider.getWAL(null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public WAL getWAL(final byte[] identifier, byte[] namespace) throws IOException {
|
public WAL getWAL(RegionInfo region) throws IOException {
|
||||||
final String group;
|
String group;
|
||||||
if (META_WAL_PROVIDER_ID.equals(this.providerId)) {
|
if (META_WAL_PROVIDER_ID.equals(this.providerId)) {
|
||||||
group = META_WAL_GROUP_NAME;
|
group = META_WAL_GROUP_NAME;
|
||||||
} else {
|
} else {
|
||||||
group = strategy.group(identifier, namespace);
|
byte[] id;
|
||||||
|
byte[] namespace;
|
||||||
|
if (region != null) {
|
||||||
|
id = region.getEncodedNameAsBytes();
|
||||||
|
namespace = region.getTable().getNamespace();
|
||||||
|
} else {
|
||||||
|
id = HConstants.EMPTY_BYTE_ARRAY;
|
||||||
|
namespace = null;
|
||||||
|
}
|
||||||
|
group = strategy.group(id, namespace);
|
||||||
}
|
}
|
||||||
return getWAL(group);
|
return getWAL(group);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/**
|
/**
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -16,25 +15,18 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.wal;
|
package org.apache.hadoop.hbase.wal;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InterruptedIOException;
|
import java.io.InterruptedIOException;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.OptionalLong;
|
import java.util.OptionalLong;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
// imports for things that haven't moved from regionserver.wal yet.
|
// imports for things that haven't moved from regionserver.wal yet.
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
|
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
|
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
|
||||||
|
@ -45,6 +37,11 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
|
import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
|
||||||
import org.apache.hadoop.hbase.wal.WAL.Reader;
|
import org.apache.hadoop.hbase.wal.WAL.Reader;
|
||||||
import org.apache.hadoop.hbase.wal.WALProvider.Writer;
|
import org.apache.hadoop.hbase.wal.WALProvider.Writer;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Entry point for users of the Write Ahead Log.
|
* Entry point for users of the Write Ahead Log.
|
||||||
|
@ -91,11 +88,11 @@ public class WALFactory implements WALFileLengthProvider {
|
||||||
static final String DEFAULT_META_WAL_PROVIDER = Providers.defaultProvider.name();
|
static final String DEFAULT_META_WAL_PROVIDER = Providers.defaultProvider.name();
|
||||||
|
|
||||||
final String factoryId;
|
final String factoryId;
|
||||||
final WALProvider provider;
|
private final WALProvider provider;
|
||||||
// The meta updates are written to a different wal. If this
|
// The meta updates are written to a different wal. If this
|
||||||
// regionserver holds meta regions, then this ref will be non-null.
|
// regionserver holds meta regions, then this ref will be non-null.
|
||||||
// lazily intialized; most RegionServers don't deal with META
|
// lazily intialized; most RegionServers don't deal with META
|
||||||
final AtomicReference<WALProvider> metaProvider = new AtomicReference<>();
|
private final AtomicReference<WALProvider> metaProvider = new AtomicReference<>();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configuration-specified WAL Reader used when a custom reader is requested
|
* Configuration-specified WAL Reader used when a custom reader is requested
|
||||||
|
@ -236,32 +233,35 @@ public class WALFactory implements WALFileLengthProvider {
|
||||||
return provider.getWALs();
|
return provider.getWALs();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private WALProvider getMetaProvider() throws IOException {
|
||||||
* @param identifier may not be null, contents will not be altered
|
for (;;) {
|
||||||
* @param namespace could be null, and will use default namespace if null
|
WALProvider provider = this.metaProvider.get();
|
||||||
*/
|
if (provider != null) {
|
||||||
public WAL getWAL(final byte[] identifier, final byte[] namespace) throws IOException {
|
return provider;
|
||||||
return provider.getWAL(identifier, namespace);
|
}
|
||||||
|
provider = getProvider(META_WAL_PROVIDER, DEFAULT_META_WAL_PROVIDER,
|
||||||
|
Collections.<WALActionsListener> singletonList(new MetricsWAL()),
|
||||||
|
AbstractFSWALProvider.META_WAL_PROVIDER_ID);
|
||||||
|
if (metaProvider.compareAndSet(null, provider)) {
|
||||||
|
return provider;
|
||||||
|
} else {
|
||||||
|
// someone is ahead of us, close and try again.
|
||||||
|
provider.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param identifier may not be null, contents will not be altered
|
* @param region the region which we want to get a WAL for it. Could be null.
|
||||||
*/
|
*/
|
||||||
public WAL getMetaWAL(final byte[] identifier) throws IOException {
|
public WAL getWAL(RegionInfo region) throws IOException {
|
||||||
WALProvider metaProvider = this.metaProvider.get();
|
// use different WAL for hbase:meta
|
||||||
if (null == metaProvider) {
|
if (region != null && region.isMetaRegion() &&
|
||||||
final WALProvider temp = getProvider(META_WAL_PROVIDER, DEFAULT_META_WAL_PROVIDER,
|
region.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
|
||||||
Collections.<WALActionsListener>singletonList(new MetricsWAL()),
|
return getMetaProvider().getWAL(region);
|
||||||
AbstractFSWALProvider.META_WAL_PROVIDER_ID);
|
} else {
|
||||||
if (this.metaProvider.compareAndSet(null, temp)) {
|
return provider.getWAL(region);
|
||||||
metaProvider = temp;
|
|
||||||
} else {
|
|
||||||
// reference must now be to a provider created in another thread.
|
|
||||||
temp.close();
|
|
||||||
metaProvider = this.metaProvider.get();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return metaProvider.getWAL(identifier, null);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Reader createReader(final FileSystem fs, final Path path) throws IOException {
|
public Reader createReader(final FileSystem fs, final Path path) throws IOException {
|
||||||
|
|
|
@ -22,11 +22,10 @@ import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
// imports for things that haven't moved from regionserver.wal yet.
|
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The Write Ahead Log (WAL) stores all durable edits to the HRegion.
|
* The Write Ahead Log (WAL) stores all durable edits to the HRegion.
|
||||||
|
@ -48,17 +47,17 @@ public interface WALProvider {
|
||||||
* @param listeners may be null
|
* @param listeners may be null
|
||||||
* @param providerId differentiate between providers from one factory. may be null
|
* @param providerId differentiate between providers from one factory. may be null
|
||||||
*/
|
*/
|
||||||
void init(final WALFactory factory, final Configuration conf,
|
void init(WALFactory factory, Configuration conf, List<WALActionsListener> listeners,
|
||||||
final List<WALActionsListener> listeners, final String providerId) throws IOException;
|
String providerId) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param identifier may not be null. contents will not be altered.
|
* @param region the region which we want to get a WAL for it. Could be null.
|
||||||
* @param namespace could be null, and will use default namespace if null
|
|
||||||
* @return a WAL for writing entries for the given region.
|
* @return a WAL for writing entries for the given region.
|
||||||
*/
|
*/
|
||||||
WAL getWAL(final byte[] identifier, byte[] namespace) throws IOException;
|
WAL getWAL(RegionInfo region) throws IOException;
|
||||||
|
|
||||||
/** @return the List of WALs that are used by this server
|
/**
|
||||||
|
* @return the List of WALs that are used by this server
|
||||||
*/
|
*/
|
||||||
List<WAL> getWALs();
|
List<WAL> getWALs();
|
||||||
|
|
||||||
|
|
|
@ -2310,9 +2310,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
Configuration confForWAL = new Configuration(conf);
|
Configuration confForWAL = new Configuration(conf);
|
||||||
confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
|
confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
|
||||||
return (new WALFactory(confForWAL,
|
return (new WALFactory(confForWAL,
|
||||||
Collections.<WALActionsListener>singletonList(new MetricsWAL()),
|
Collections.<WALActionsListener> singletonList(new MetricsWAL()),
|
||||||
"hregion-" + RandomStringUtils.randomNumeric(8))).
|
"hregion-" + RandomStringUtils.randomNumeric(8))).getWAL(hri);
|
||||||
getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/**
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -16,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.coprocessor;
|
package org.apache.hadoop.hbase.coprocessor;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
@ -31,7 +29,6 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -40,18 +37,19 @@ import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.Coprocessor;
|
import org.apache.hadoop.hbase.Coprocessor;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
|
import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
|
||||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
|
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
@ -61,6 +59,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||||
import org.apache.hadoop.hbase.wal.WAL;
|
import org.apache.hadoop.hbase.wal.WAL;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||||
import org.apache.hadoop.hbase.wal.WALFactory;
|
import org.apache.hadoop.hbase.wal.WALFactory;
|
||||||
import org.apache.hadoop.hbase.wal.WALKeyImpl;
|
import org.apache.hadoop.hbase.wal.WALKeyImpl;
|
||||||
import org.apache.hadoop.hbase.wal.WALSplitter;
|
import org.apache.hadoop.hbase.wal.WALSplitter;
|
||||||
|
@ -172,17 +171,17 @@ public class TestWALObserver {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testWALObserverWriteToWAL() throws Exception {
|
public void testWALObserverWriteToWAL() throws Exception {
|
||||||
final WAL log = wals.getWAL(UNSPECIFIED_REGION, null);
|
final WAL log = wals.getWAL(null);
|
||||||
verifyWritesSeen(log, getCoprocessor(log, SampleRegionWALCoprocessor.class), false);
|
verifyWritesSeen(log, getCoprocessor(log, SampleRegionWALCoprocessor.class), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verifyWritesSeen(final WAL log, final SampleRegionWALCoprocessor cp,
|
private void verifyWritesSeen(final WAL log, final SampleRegionWALCoprocessor cp,
|
||||||
final boolean seesLegacy) throws Exception {
|
final boolean seesLegacy) throws Exception {
|
||||||
HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE));
|
RegionInfo hri = createBasicHRegionInfo(Bytes.toString(TEST_TABLE));
|
||||||
final HTableDescriptor htd = createBasic3FamilyHTD(Bytes
|
TableDescriptor htd = createBasic3FamilyHTD(Bytes
|
||||||
.toString(TEST_TABLE));
|
.toString(TEST_TABLE));
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
for (byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
scopes.put(fam, 0);
|
scopes.put(fam, 0);
|
||||||
}
|
}
|
||||||
Path basedir = new Path(this.hbaseRootDir, Bytes.toString(TEST_TABLE));
|
Path basedir = new Path(this.hbaseRootDir, Bytes.toString(TEST_TABLE));
|
||||||
|
@ -268,14 +267,14 @@ public class TestWALObserver {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testEmptyWALEditAreNotSeen() throws Exception {
|
public void testEmptyWALEditAreNotSeen() throws Exception {
|
||||||
final HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE));
|
RegionInfo hri = createBasicHRegionInfo(Bytes.toString(TEST_TABLE));
|
||||||
final HTableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE));
|
TableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE));
|
||||||
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
for(byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
scopes.put(fam, 0);
|
scopes.put(fam, 0);
|
||||||
}
|
}
|
||||||
WAL log = wals.getWAL(UNSPECIFIED_REGION, null);
|
WAL log = wals.getWAL(null);
|
||||||
try {
|
try {
|
||||||
SampleRegionWALCoprocessor cp = getCoprocessor(log, SampleRegionWALCoprocessor.class);
|
SampleRegionWALCoprocessor cp = getCoprocessor(log, SampleRegionWALCoprocessor.class);
|
||||||
|
|
||||||
|
@ -304,14 +303,14 @@ public class TestWALObserver {
|
||||||
public void testWALCoprocessorReplay() throws Exception {
|
public void testWALCoprocessorReplay() throws Exception {
|
||||||
// WAL replay is handled at HRegion::replayRecoveredEdits(), which is
|
// WAL replay is handled at HRegion::replayRecoveredEdits(), which is
|
||||||
// ultimately called by HRegion::initialize()
|
// ultimately called by HRegion::initialize()
|
||||||
final TableName tableName = TableName.valueOf(currentTest.getMethodName());
|
TableName tableName = TableName.valueOf(currentTest.getMethodName());
|
||||||
final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(tableName);
|
TableDescriptor htd = getBasic3FamilyHTableDescriptor(tableName);
|
||||||
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
||||||
// final HRegionInfo hri =
|
// final HRegionInfo hri =
|
||||||
// createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
|
// createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
|
||||||
// final HRegionInfo hri1 =
|
// final HRegionInfo hri1 =
|
||||||
// createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
|
// createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
|
||||||
final HRegionInfo hri = new HRegionInfo(tableName, null, null);
|
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
|
|
||||||
final Path basedir =
|
final Path basedir =
|
||||||
FSUtils.getTableDir(this.hbaseRootDir, tableName);
|
FSUtils.getTableDir(this.hbaseRootDir, tableName);
|
||||||
|
@ -321,21 +320,21 @@ public class TestWALObserver {
|
||||||
final Configuration newConf = HBaseConfiguration.create(this.conf);
|
final Configuration newConf = HBaseConfiguration.create(this.conf);
|
||||||
|
|
||||||
// WAL wal = new WAL(this.fs, this.dir, this.oldLogDir, this.conf);
|
// WAL wal = new WAL(this.fs, this.dir, this.oldLogDir, this.conf);
|
||||||
WAL wal = wals.getWAL(UNSPECIFIED_REGION, null);
|
WAL wal = wals.getWAL(null);
|
||||||
// Put p = creatPutWith2Families(TEST_ROW);
|
// Put p = creatPutWith2Families(TEST_ROW);
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
long now = EnvironmentEdgeManager.currentTime();
|
long now = EnvironmentEdgeManager.currentTime();
|
||||||
final int countPerFamily = 1000;
|
final int countPerFamily = 1000;
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for (HColumnDescriptor hcd : htd.getFamilies()) {
|
for (byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
scopes.put(hcd.getName(), 0);
|
scopes.put(fam, 0);
|
||||||
}
|
}
|
||||||
for (HColumnDescriptor hcd : htd.getFamilies()) {
|
for (byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
|
addWALEdits(tableName, hri, TEST_ROW, fam, countPerFamily,
|
||||||
EnvironmentEdgeManager.getDelegate(), wal, scopes, mvcc);
|
EnvironmentEdgeManager.getDelegate(), wal, scopes, mvcc);
|
||||||
}
|
}
|
||||||
wal.append(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit,
|
wal.append(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit,
|
||||||
true);
|
true);
|
||||||
// sync to fs.
|
// sync to fs.
|
||||||
wal.sync();
|
wal.sync();
|
||||||
|
|
||||||
|
@ -345,14 +344,12 @@ public class TestWALObserver {
|
||||||
public Void run() throws Exception {
|
public Void run() throws Exception {
|
||||||
Path p = runWALSplit(newConf);
|
Path p = runWALSplit(newConf);
|
||||||
LOG.info("WALSplit path == " + p);
|
LOG.info("WALSplit path == " + p);
|
||||||
FileSystem newFS = FileSystem.get(newConf);
|
|
||||||
// Make a new wal for new region open.
|
// Make a new wal for new region open.
|
||||||
final WALFactory wals2 = new WALFactory(conf, null,
|
final WALFactory wals2 = new WALFactory(conf, null,
|
||||||
ServerName.valueOf(currentTest.getMethodName()+"2", 16010, System.currentTimeMillis()).toString());
|
ServerName.valueOf(currentTest.getMethodName()+"2", 16010, System.currentTimeMillis()).toString());
|
||||||
WAL wal2 = wals2.getWAL(UNSPECIFIED_REGION, null);
|
WAL wal2 = wals2.getWAL(null);
|
||||||
HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir,
|
HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir,
|
||||||
hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
|
hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
|
||||||
long seqid2 = region.getOpenSeqNum();
|
|
||||||
|
|
||||||
SampleRegionWALCoprocessor cp2 =
|
SampleRegionWALCoprocessor cp2 =
|
||||||
region.getCoprocessorHost().findCoprocessor(SampleRegionWALCoprocessor.class);
|
region.getCoprocessorHost().findCoprocessor(SampleRegionWALCoprocessor.class);
|
||||||
|
@ -374,13 +371,13 @@ public class TestWALObserver {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testWALObserverLoaded() throws Exception {
|
public void testWALObserverLoaded() throws Exception {
|
||||||
WAL log = wals.getWAL(UNSPECIFIED_REGION, null);
|
WAL log = wals.getWAL(null);
|
||||||
assertNotNull(getCoprocessor(log, SampleRegionWALCoprocessor.class));
|
assertNotNull(getCoprocessor(log, SampleRegionWALCoprocessor.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testWALObserverRoll() throws Exception {
|
public void testWALObserverRoll() throws Exception {
|
||||||
final WAL wal = wals.getWAL(UNSPECIFIED_REGION, null);
|
final WAL wal = wals.getWAL(null);
|
||||||
final SampleRegionWALCoprocessor cp = getCoprocessor(wal, SampleRegionWALCoprocessor.class);
|
final SampleRegionWALCoprocessor cp = getCoprocessor(wal, SampleRegionWALCoprocessor.class);
|
||||||
cp.setTestValues(TEST_TABLE, null, null, null, null, null, null, null);
|
cp.setTestValues(TEST_TABLE, null, null, null, null, null, null, null);
|
||||||
|
|
||||||
|
@ -399,20 +396,12 @@ public class TestWALObserver {
|
||||||
return (SampleRegionWALCoprocessor) c;
|
return (SampleRegionWALCoprocessor) c;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Creates an HRI around an HTD that has <code>tableName</code> and three
|
* Creates an HRI around an HTD that has <code>tableName</code>.
|
||||||
* column families named.
|
* @param tableName Name of table to use.
|
||||||
*
|
|
||||||
* @param tableName Name of table to use when we create HTableDescriptor.
|
|
||||||
*/
|
*/
|
||||||
private HRegionInfo createBasic3FamilyHRegionInfo(final String tableName) {
|
private RegionInfo createBasicHRegionInfo(String tableName) {
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
|
return RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
|
||||||
|
|
||||||
for (int i = 0; i < TEST_FAMILY.length; i++) {
|
|
||||||
HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]);
|
|
||||||
htd.addFamily(a);
|
|
||||||
}
|
|
||||||
return new HRegionInfo(htd.getTableName(), null, null, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -463,12 +452,10 @@ public class TestWALObserver {
|
||||||
return splits.get(0);
|
return splits.get(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final byte[] UNSPECIFIED_REGION = new byte[]{};
|
private void addWALEdits(final TableName tableName, final RegionInfo hri, final byte[] rowName,
|
||||||
|
|
||||||
private void addWALEdits(final TableName tableName, final HRegionInfo hri, final byte[] rowName,
|
|
||||||
final byte[] family, final int count, EnvironmentEdge ee, final WAL wal,
|
final byte[] family, final int count, EnvironmentEdge ee, final WAL wal,
|
||||||
final NavigableMap<byte[], Integer> scopes, final MultiVersionConcurrencyControl mvcc)
|
final NavigableMap<byte[], Integer> scopes, final MultiVersionConcurrencyControl mvcc)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String familyStr = Bytes.toString(family);
|
String familyStr = Bytes.toString(family);
|
||||||
long txid = -1;
|
long txid = -1;
|
||||||
for (int j = 0; j < count; j++) {
|
for (int j = 0; j < count; j++) {
|
||||||
|
@ -478,33 +465,25 @@ public class TestWALObserver {
|
||||||
edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes));
|
edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes));
|
||||||
// uses WALKeyImpl instead of HLogKey on purpose. will only work for tests where we don't care
|
// uses WALKeyImpl instead of HLogKey on purpose. will only work for tests where we don't care
|
||||||
// about legacy coprocessors
|
// about legacy coprocessors
|
||||||
txid = wal.append(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName,
|
txid = wal.append(hri,
|
||||||
ee.currentTime(), mvcc), edit, true);
|
new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, ee.currentTime(), mvcc), edit, true);
|
||||||
}
|
}
|
||||||
if (-1 != txid) {
|
if (-1 != txid) {
|
||||||
wal.sync(txid);
|
wal.sync(txid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private HTableDescriptor getBasic3FamilyHTableDescriptor(
|
private TableDescriptor getBasic3FamilyHTableDescriptor(TableName tableName) {
|
||||||
final TableName tableName) {
|
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
Arrays.stream(TEST_FAMILY).map(ColumnFamilyDescriptorBuilder::of)
|
||||||
|
.forEachOrdered(builder::addColumnFamily);
|
||||||
for (int i = 0; i < TEST_FAMILY.length; i++) {
|
return builder.build();
|
||||||
HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]);
|
|
||||||
htd.addFamily(a);
|
|
||||||
}
|
|
||||||
return htd;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private HTableDescriptor createBasic3FamilyHTD(final String tableName) {
|
private TableDescriptor createBasic3FamilyHTD(String tableName) {
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
|
return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
|
||||||
HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("a"))
|
||||||
htd.addFamily(a);
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("b"))
|
||||||
HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("c")).build();
|
||||||
htd.addFamily(b);
|
|
||||||
HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
|
|
||||||
htd.addFamily(c);
|
|
||||||
return htd;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,16 +26,18 @@ import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||||
|
@ -106,18 +108,19 @@ public class TestCacheOnWriteInSchema {
|
||||||
return blockType == blockType1 || blockType == blockType2;
|
return blockType == blockType1 || blockType == blockType2;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void modifyFamilySchema(HColumnDescriptor family) {
|
public ColumnFamilyDescriptorBuilder modifyFamilySchema(ColumnFamilyDescriptorBuilder builder) {
|
||||||
switch (this) {
|
switch (this) {
|
||||||
case DATA_BLOCKS:
|
case DATA_BLOCKS:
|
||||||
family.setCacheDataOnWrite(true);
|
builder.setCacheDataOnWrite(true);
|
||||||
break;
|
break;
|
||||||
case BLOOM_BLOCKS:
|
case BLOOM_BLOCKS:
|
||||||
family.setCacheBloomsOnWrite(true);
|
builder.setCacheBloomsOnWrite(true);
|
||||||
break;
|
break;
|
||||||
case INDEX_BLOCKS:
|
case INDEX_BLOCKS:
|
||||||
family.setCacheIndexesOnWrite(true);
|
builder.setCacheIndexesOnWrite(true);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
return builder;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,23 +161,22 @@ public class TestCacheOnWriteInSchema {
|
||||||
fs = HFileSystem.get(conf);
|
fs = HFileSystem.get(conf);
|
||||||
|
|
||||||
// Create the schema
|
// Create the schema
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
ColumnFamilyDescriptor hcd = cowType
|
||||||
hcd.setBloomFilterType(BloomType.ROWCOL);
|
.modifyFamilySchema(
|
||||||
cowType.modifyFamilySchema(hcd);
|
ColumnFamilyDescriptorBuilder.newBuilder(family).setBloomFilterType(BloomType.ROWCOL))
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
|
.build();
|
||||||
htd.addFamily(hcd);
|
TableDescriptor htd =
|
||||||
|
TableDescriptorBuilder.newBuilder(TableName.valueOf(table)).addColumnFamily(hcd).build();
|
||||||
|
|
||||||
// Create a store based on the schema
|
// Create a store based on the schema
|
||||||
final String id = TestCacheOnWriteInSchema.class.getName();
|
String id = TestCacheOnWriteInSchema.class.getName();
|
||||||
final Path logdir = new Path(FSUtils.getRootDir(conf),
|
Path logdir = new Path(FSUtils.getRootDir(conf), AbstractFSWALProvider.getWALDirectoryName(id));
|
||||||
AbstractFSWALProvider.getWALDirectoryName(id));
|
|
||||||
fs.delete(logdir, true);
|
fs.delete(logdir, true);
|
||||||
|
|
||||||
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
|
RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
|
||||||
walFactory = new WALFactory(conf, null, id);
|
walFactory = new WALFactory(conf, null, id);
|
||||||
|
|
||||||
region = TEST_UTIL.createLocalHRegion(info, htd,
|
region = TEST_UTIL.createLocalHRegion(info, htd, walFactory.getWAL(info));
|
||||||
walFactory.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()));
|
|
||||||
store = new HStore(region, hcd, conf);
|
store = new HStore(region, hcd, conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InterruptedIOException;
|
import java.io.InterruptedIOException;
|
||||||
|
@ -29,17 +28,18 @@ import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
import org.apache.hadoop.hbase.Stoppable;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -85,10 +85,10 @@ public class TestCompactionArchiveConcurrentClose {
|
||||||
byte[] col = Bytes.toBytes("c");
|
byte[] col = Bytes.toBytes("c");
|
||||||
byte[] val = Bytes.toBytes("val");
|
byte[] val = Bytes.toBytes("val");
|
||||||
|
|
||||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
|
||||||
htd.addFamily(new HColumnDescriptor(fam));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build();
|
||||||
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
|
RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
HRegion region = initHRegion(htd, info);
|
HRegion region = initHRegion(htd, info);
|
||||||
RegionServerServices rss = mock(RegionServerServices.class);
|
RegionServerServices rss = mock(RegionServerServices.class);
|
||||||
List<HRegion> regions = new ArrayList<>();
|
List<HRegion> regions = new ArrayList<>();
|
||||||
|
@ -157,20 +157,17 @@ public class TestCompactionArchiveConcurrentClose {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private HRegion initHRegion(HTableDescriptor htd, HRegionInfo info)
|
private HRegion initHRegion(TableDescriptor htd, RegionInfo info) throws IOException {
|
||||||
throws IOException {
|
|
||||||
Configuration conf = testUtil.getConfiguration();
|
Configuration conf = testUtil.getConfiguration();
|
||||||
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
|
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
|
||||||
|
|
||||||
HRegionFileSystem fs = new WaitingHRegionFileSystem(conf, tableDir.getFileSystem(conf),
|
HRegionFileSystem fs =
|
||||||
tableDir, info);
|
new WaitingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info);
|
||||||
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
||||||
final Configuration walConf = new Configuration(conf);
|
final Configuration walConf = new Configuration(conf);
|
||||||
FSUtils.setRootDir(walConf, tableDir);
|
FSUtils.setRootDir(walConf, tableDir);
|
||||||
final WALFactory wals = new WALFactory(walConf, null, "log_" + info.getEncodedName());
|
final WALFactory wals = new WALFactory(walConf, null, "log_" + info.getEncodedName());
|
||||||
HRegion region =
|
HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null);
|
||||||
new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()),
|
|
||||||
conf, htd, null);
|
|
||||||
|
|
||||||
region.initialize();
|
region.initialize();
|
||||||
|
|
||||||
|
@ -180,7 +177,7 @@ public class TestCompactionArchiveConcurrentClose {
|
||||||
private class WaitingHRegionFileSystem extends HRegionFileSystem {
|
private class WaitingHRegionFileSystem extends HRegionFileSystem {
|
||||||
|
|
||||||
public WaitingHRegionFileSystem(final Configuration conf, final FileSystem fs,
|
public WaitingHRegionFileSystem(final Configuration conf, final FileSystem fs,
|
||||||
final Path tableDir, final HRegionInfo regionInfo) {
|
final Path tableDir, final RegionInfo regionInfo) {
|
||||||
super(conf, fs, tableDir, regionInfo);
|
super(conf, fs, tableDir, regionInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,38 +15,36 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.ArgumentMatchers.any;
|
||||||
import static org.mockito.Matchers.eq;
|
import static org.mockito.ArgumentMatchers.eq;
|
||||||
import static org.mockito.Mockito.doThrow;
|
import static org.mockito.Mockito.doThrow;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.spy;
|
import static org.mockito.Mockito.spy;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
import org.apache.hadoop.hbase.Stoppable;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.FailedArchiveException;
|
import org.apache.hadoop.hbase.backup.FailedArchiveException;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
@ -59,6 +57,8 @@ import org.junit.experimental.categories.Category;
|
||||||
import org.junit.rules.TestName;
|
import org.junit.rules.TestName;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests that archiving compacted files behaves correctly when encountering exceptions.
|
* Tests that archiving compacted files behaves correctly when encountering exceptions.
|
||||||
*/
|
*/
|
||||||
|
@ -93,11 +93,11 @@ public class TestCompactionArchiveIOException {
|
||||||
byte[] col = Bytes.toBytes("c");
|
byte[] col = Bytes.toBytes("c");
|
||||||
byte[] val = Bytes.toBytes("val");
|
byte[] val = Bytes.toBytes("val");
|
||||||
|
|
||||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
|
||||||
htd.addFamily(new HColumnDescriptor(fam));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build();
|
||||||
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
|
RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
final HRegion region = initHRegion(htd, info);
|
HRegion region = initHRegion(htd, info);
|
||||||
RegionServerServices rss = mock(RegionServerServices.class);
|
RegionServerServices rss = mock(RegionServerServices.class);
|
||||||
List<HRegion> regions = new ArrayList<>();
|
List<HRegion> regions = new ArrayList<>();
|
||||||
regions.add(region);
|
regions.add(region);
|
||||||
|
@ -172,29 +172,25 @@ public class TestCompactionArchiveIOException {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private HRegion initHRegion(HTableDescriptor htd, HRegionInfo info)
|
private HRegion initHRegion(TableDescriptor htd, RegionInfo info) throws IOException {
|
||||||
throws IOException {
|
|
||||||
Configuration conf = testUtil.getConfiguration();
|
Configuration conf = testUtil.getConfiguration();
|
||||||
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
||||||
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
|
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
|
||||||
Path regionDir = new Path(tableDir, info.getEncodedName());
|
Path regionDir = new Path(tableDir, info.getEncodedName());
|
||||||
Path storeDir = new Path(regionDir, htd.getColumnFamilies()[0].getNameAsString());
|
Path storeDir = new Path(regionDir, htd.getColumnFamilies()[0].getNameAsString());
|
||||||
|
|
||||||
|
|
||||||
FileSystem errFS = spy(testUtil.getTestFileSystem());
|
FileSystem errFS = spy(testUtil.getTestFileSystem());
|
||||||
// Prior to HBASE-16964, when an exception is thrown archiving any compacted file,
|
// Prior to HBASE-16964, when an exception is thrown archiving any compacted file,
|
||||||
// none of the other files are cleared from the compactedfiles list.
|
// none of the other files are cleared from the compactedfiles list.
|
||||||
// Simulate this condition with a dummy file
|
// Simulate this condition with a dummy file
|
||||||
doThrow(new IOException("Error for test"))
|
doThrow(new IOException("Error for test")).when(errFS)
|
||||||
.when(errFS).rename(eq(new Path(storeDir, ERROR_FILE)), any());
|
.rename(eq(new Path(storeDir, ERROR_FILE)), any());
|
||||||
|
|
||||||
HRegionFileSystem fs = new HRegionFileSystem(conf, errFS, tableDir, info);
|
HRegionFileSystem fs = new HRegionFileSystem(conf, errFS, tableDir, info);
|
||||||
final Configuration walConf = new Configuration(conf);
|
final Configuration walConf = new Configuration(conf);
|
||||||
FSUtils.setRootDir(walConf, tableDir);
|
FSUtils.setRootDir(walConf, tableDir);
|
||||||
final WALFactory wals = new WALFactory(walConf, null, "log_" + info.getEncodedName());
|
final WALFactory wals = new WALFactory(walConf, null, "log_" + info.getEncodedName());
|
||||||
HRegion region =
|
HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null);
|
||||||
new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()),
|
|
||||||
conf, htd, null);
|
|
||||||
|
|
||||||
region.initialize();
|
region.initialize();
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,6 @@ import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.CategoryBasedTimeout;
|
import org.apache.hadoop.hbase.CategoryBasedTimeout;
|
||||||
|
@ -39,18 +38,19 @@ import org.apache.hadoop.hbase.CellComparatorImpl;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeepDeletedCells;
|
import org.apache.hadoop.hbase.KeepDeletedCells;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.KeyValueTestUtil;
|
import org.apache.hadoop.hbase.KeyValueTestUtil;
|
||||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
|
import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
|
@ -68,6 +68,7 @@ import org.junit.rules.TestName;
|
||||||
import org.junit.rules.TestRule;
|
import org.junit.rules.TestRule;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
|
import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
|
import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
|
@ -451,22 +452,16 @@ public class TestDefaultMemStore {
|
||||||
|
|
||||||
final MultiVersionConcurrencyControl mvcc;
|
final MultiVersionConcurrencyControl mvcc;
|
||||||
final MemStore memstore;
|
final MemStore memstore;
|
||||||
final AtomicLong startSeqNum;
|
|
||||||
|
|
||||||
AtomicReference<Throwable> caughtException;
|
AtomicReference<Throwable> caughtException;
|
||||||
|
|
||||||
|
|
||||||
public ReadOwnWritesTester(int id,
|
public ReadOwnWritesTester(int id, MemStore memstore, MultiVersionConcurrencyControl mvcc,
|
||||||
MemStore memstore,
|
AtomicReference<Throwable> caughtException) {
|
||||||
MultiVersionConcurrencyControl mvcc,
|
|
||||||
AtomicReference<Throwable> caughtException,
|
|
||||||
AtomicLong startSeqNum)
|
|
||||||
{
|
|
||||||
this.mvcc = mvcc;
|
this.mvcc = mvcc;
|
||||||
this.memstore = memstore;
|
this.memstore = memstore;
|
||||||
this.caughtException = caughtException;
|
this.caughtException = caughtException;
|
||||||
row = Bytes.toBytes(id);
|
row = Bytes.toBytes(id);
|
||||||
this.startSeqNum = startSeqNum;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -505,14 +500,13 @@ public class TestDefaultMemStore {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReadOwnWritesUnderConcurrency() throws Throwable {
|
public void testReadOwnWritesUnderConcurrency() throws Throwable {
|
||||||
|
|
||||||
int NUM_THREADS = 8;
|
int NUM_THREADS = 8;
|
||||||
|
|
||||||
ReadOwnWritesTester threads[] = new ReadOwnWritesTester[NUM_THREADS];
|
ReadOwnWritesTester threads[] = new ReadOwnWritesTester[NUM_THREADS];
|
||||||
AtomicReference<Throwable> caught = new AtomicReference<>();
|
AtomicReference<Throwable> caught = new AtomicReference<>();
|
||||||
|
|
||||||
for (int i = 0; i < NUM_THREADS; i++) {
|
for (int i = 0; i < NUM_THREADS; i++) {
|
||||||
threads[i] = new ReadOwnWritesTester(i, memstore, mvcc, caught, this.startSeqNum);
|
threads[i] = new ReadOwnWritesTester(i, memstore, mvcc, caught);
|
||||||
threads[i].start();
|
threads[i].start();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -921,7 +915,8 @@ public class TestDefaultMemStore {
|
||||||
EnvironmentEdgeManager.injectEdge(edge);
|
EnvironmentEdgeManager.injectEdge(edge);
|
||||||
HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf);
|
HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf);
|
||||||
String cf = "foo";
|
String cf = "foo";
|
||||||
HRegion region = hbaseUtility.createTestRegion("foobar", new HColumnDescriptor(cf));
|
HRegion region =
|
||||||
|
hbaseUtility.createTestRegion("foobar", ColumnFamilyDescriptorBuilder.of(cf));
|
||||||
|
|
||||||
edge.setCurrentTimeMillis(1234);
|
edge.setCurrentTimeMillis(1234);
|
||||||
Put p = new Put(Bytes.toBytes("r"));
|
Put p = new Put(Bytes.toBytes("r"));
|
||||||
|
@ -950,20 +945,16 @@ public class TestDefaultMemStore {
|
||||||
EnvironmentEdgeManager.injectEdge(edge);
|
EnvironmentEdgeManager.injectEdge(edge);
|
||||||
edge.setCurrentTimeMillis(1234);
|
edge.setCurrentTimeMillis(1234);
|
||||||
WALFactory wFactory = new WALFactory(conf, null, "1234");
|
WALFactory wFactory = new WALFactory(conf, null, "1234");
|
||||||
HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
|
HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir,
|
||||||
conf, FSTableDescriptors.createMetaTableDescriptor(conf),
|
conf, FSTableDescriptors.createMetaTableDescriptor(conf),
|
||||||
wFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO.
|
wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO));
|
||||||
getEncodedNameAsBytes()));
|
|
||||||
// parameterized tests add [#] suffix get rid of [ and ].
|
// parameterized tests add [#] suffix get rid of [ and ].
|
||||||
HRegionInfo hri =
|
TableDescriptor desc = TableDescriptorBuilder
|
||||||
new HRegionInfo(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_")),
|
.newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_")))
|
||||||
Bytes.toBytes("row_0200"), Bytes.toBytes("row_0300"));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("foo")).build();
|
||||||
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(
|
RegionInfo hri = RegionInfoBuilder.newBuilder(desc.getTableName())
|
||||||
name.getMethodName().replaceAll("[\\[\\]]", "_")));
|
.setStartKey(Bytes.toBytes("row_0200")).setEndKey(Bytes.toBytes("row_0300")).build();
|
||||||
desc.addFamily(new HColumnDescriptor("foo".getBytes()));
|
HRegion r = HRegion.createHRegion(hri, testDir, conf, desc, wFactory.getWAL(hri));
|
||||||
HRegion r =
|
|
||||||
HRegion.createHRegion(hri, testDir, conf, desc,
|
|
||||||
wFactory.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()));
|
|
||||||
addRegionToMETA(meta, r);
|
addRegionToMETA(meta, r);
|
||||||
edge.setCurrentTimeMillis(1234 + 100);
|
edge.setCurrentTimeMillis(1234 + 100);
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
|
|
|
@ -162,8 +162,7 @@ public class TestHMobStore {
|
||||||
final Configuration walConf = new Configuration(conf);
|
final Configuration walConf = new Configuration(conf);
|
||||||
FSUtils.setRootDir(walConf, basedir);
|
FSUtils.setRootDir(walConf, basedir);
|
||||||
final WALFactory wals = new WALFactory(walConf, null, methodName);
|
final WALFactory wals = new WALFactory(walConf, null, methodName);
|
||||||
region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes(),
|
region = new HRegion(tableDir, wals.getWAL(info), fs, conf, info, htd, null);
|
||||||
info.getTable().getNamespace()), fs, conf, info, htd, null);
|
|
||||||
store = new HMobStore(region, hcd, conf);
|
store = new HMobStore(region, hcd, conf);
|
||||||
if(testStore) {
|
if(testStore) {
|
||||||
init(conf, hcd);
|
init(conf, hcd);
|
||||||
|
|
|
@ -63,7 +63,6 @@ import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
import org.apache.commons.lang3.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
@ -110,6 +109,7 @@ import org.apache.hadoop.hbase.client.Increment;
|
||||||
import org.apache.hadoop.hbase.client.Mutation;
|
import org.apache.hadoop.hbase.client.Mutation;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.RowMutations;
|
import org.apache.hadoop.hbase.client.RowMutations;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
@ -180,8 +180,10 @@ import org.mockito.invocation.InvocationOnMock;
|
||||||
import org.mockito.stubbing.Answer;
|
import org.mockito.stubbing.Answer;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
|
import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
|
||||||
|
@ -378,9 +380,8 @@ public class TestHRegion {
|
||||||
final Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log");
|
final Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log");
|
||||||
final Configuration walConf = new Configuration(conf);
|
final Configuration walConf = new Configuration(conf);
|
||||||
FSUtils.setRootDir(walConf, logDir);
|
FSUtils.setRootDir(walConf, logDir);
|
||||||
return (new WALFactory(walConf,
|
return new WALFactory(walConf, Collections.<WALActionsListener> singletonList(new MetricsWAL()),
|
||||||
Collections.<WALActionsListener>singletonList(new MetricsWAL()), callingMethod))
|
callingMethod).getWAL(RegionInfoBuilder.newBuilder(tableName).build());
|
||||||
.getWAL(tableName.toBytes(), tableName.getNamespace());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -960,7 +961,7 @@ public class TestHRegion {
|
||||||
final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
|
final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
|
||||||
FSUtils.setRootDir(walConf, logDir);
|
FSUtils.setRootDir(walConf, logDir);
|
||||||
final WALFactory wals = new WALFactory(walConf, null, method);
|
final WALFactory wals = new WALFactory(walConf, null, method);
|
||||||
final WAL wal = wals.getWAL(tableName.getName(), tableName.getNamespace());
|
final WAL wal = wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build());
|
||||||
|
|
||||||
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
|
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
|
||||||
HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family);
|
HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family);
|
||||||
|
@ -4690,7 +4691,7 @@ public class TestHRegion {
|
||||||
// deal with classes which have a field of an inner class. See discussions in HBASE-15536.
|
// deal with classes which have a field of an inner class. See discussions in HBASE-15536.
|
||||||
walConf.set(WALFactory.WAL_PROVIDER, "filesystem");
|
walConf.set(WALFactory.WAL_PROVIDER, "filesystem");
|
||||||
final WALFactory wals = new WALFactory(walConf, null, UUID.randomUUID().toString());
|
final WALFactory wals = new WALFactory(walConf, null, UUID.randomUUID().toString());
|
||||||
final WAL wal = spy(wals.getWAL(tableName.getName(), tableName.getNamespace()));
|
final WAL wal = spy(wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build()));
|
||||||
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
|
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
|
||||||
HConstants.EMPTY_END_ROW, false, tableDurability, wal,
|
HConstants.EMPTY_END_ROW, false, tableDurability, wal,
|
||||||
new byte[][] { family });
|
new byte[][] { family });
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
import static org.apache.hadoop.hbase.regionserver.TestHRegion.assertGet;
|
import static org.apache.hadoop.hbase.regionserver.TestHRegion.assertGet;
|
||||||
|
@ -27,8 +26,8 @@ import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertNull;
|
import static org.junit.Assert.assertNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.ArgumentMatchers.any;
|
||||||
import static org.mockito.Matchers.anyBoolean;
|
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.spy;
|
import static org.mockito.Mockito.spy;
|
||||||
import static org.mockito.Mockito.times;
|
import static org.mockito.Mockito.times;
|
||||||
|
@ -43,7 +42,6 @@ import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -51,7 +49,6 @@ import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
@ -59,6 +56,8 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.executor.ExecutorService;
|
import org.apache.hadoop.hbase.executor.ExecutorService;
|
||||||
|
@ -89,8 +88,10 @@ import org.junit.rules.TestName;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
|
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
|
||||||
|
@ -132,7 +133,7 @@ public class TestHRegionReplayEvents {
|
||||||
private TableDescriptor htd;
|
private TableDescriptor htd;
|
||||||
private long time;
|
private long time;
|
||||||
private RegionServerServices rss;
|
private RegionServerServices rss;
|
||||||
private HRegionInfo primaryHri, secondaryHri;
|
private RegionInfo primaryHri, secondaryHri;
|
||||||
private HRegion primaryRegion, secondaryRegion;
|
private HRegion primaryRegion, secondaryRegion;
|
||||||
private WALFactory wals;
|
private WALFactory wals;
|
||||||
private WAL walPrimary, walSecondary;
|
private WAL walPrimary, walSecondary;
|
||||||
|
@ -156,18 +157,14 @@ public class TestHRegionReplayEvents {
|
||||||
|
|
||||||
time = System.currentTimeMillis();
|
time = System.currentTimeMillis();
|
||||||
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
||||||
primaryHri = new HRegionInfo(htd.getTableName(),
|
primaryHri =
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
|
RegionInfoBuilder.newBuilder(htd.getTableName()).setRegionId(time).setReplicaId(0).build();
|
||||||
false, time, 0);
|
secondaryHri =
|
||||||
secondaryHri = new HRegionInfo(htd.getTableName(),
|
RegionInfoBuilder.newBuilder(htd.getTableName()).setRegionId(time).setReplicaId(1).build();
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
|
|
||||||
false, time, 1);
|
|
||||||
|
|
||||||
wals = TestHRegion.createWALFactory(CONF, rootDir);
|
wals = TestHRegion.createWALFactory(CONF, rootDir);
|
||||||
walPrimary = wals.getWAL(primaryHri.getEncodedNameAsBytes(),
|
walPrimary = wals.getWAL(primaryHri);
|
||||||
primaryHri.getTable().getNamespace());
|
walSecondary = wals.getWAL(secondaryHri);
|
||||||
walSecondary = wals.getWAL(secondaryHri.getEncodedNameAsBytes(),
|
|
||||||
secondaryHri.getTable().getNamespace());
|
|
||||||
|
|
||||||
rss = mock(RegionServerServices.class);
|
rss = mock(RegionServerServices.class);
|
||||||
when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1));
|
when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1));
|
||||||
|
@ -1150,8 +1147,8 @@ public class TestHRegionReplayEvents {
|
||||||
|
|
||||||
// test for region open and close
|
// test for region open and close
|
||||||
secondaryRegion = HRegion.openHRegion(secondaryHri, htd, walSecondary, CONF, rss, null);
|
secondaryRegion = HRegion.openHRegion(secondaryHri, htd, walSecondary, CONF, rss, null);
|
||||||
verify(walSecondary, times(0)).append((HRegionInfo)any(),
|
verify(walSecondary, times(0)).append(any(RegionInfo.class), any(WALKeyImpl.class),
|
||||||
(WALKeyImpl)any(), (WALEdit)any(), anyBoolean());
|
any(WALEdit.class), anyBoolean());
|
||||||
|
|
||||||
// test for replay prepare flush
|
// test for replay prepare flush
|
||||||
putDataByReplay(secondaryRegion, 0, 10, cq, families);
|
putDataByReplay(secondaryRegion, 0, 10, cq, families);
|
||||||
|
@ -1166,12 +1163,12 @@ public class TestHRegionReplayEvents {
|
||||||
primaryRegion.getRegionInfo().getRegionName()))
|
primaryRegion.getRegionInfo().getRegionName()))
|
||||||
.build());
|
.build());
|
||||||
|
|
||||||
verify(walSecondary, times(0)).append((HRegionInfo)any(),
|
verify(walSecondary, times(0)).append(any(RegionInfo.class), any(WALKeyImpl.class),
|
||||||
(WALKeyImpl)any(), (WALEdit)any(), anyBoolean());
|
any(WALEdit.class), anyBoolean());
|
||||||
|
|
||||||
secondaryRegion.close();
|
secondaryRegion.close();
|
||||||
verify(walSecondary, times(0)).append((HRegionInfo)any(),
|
verify(walSecondary, times(0)).append(any(RegionInfo.class), any(WALKeyImpl.class),
|
||||||
(WALKeyImpl)any(), (WALEdit)any(), anyBoolean());
|
any(WALEdit.class), anyBoolean());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -21,7 +21,7 @@ import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.ArgumentMatchers.any;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.spy;
|
import static org.mockito.Mockito.spy;
|
||||||
import static org.mockito.Mockito.times;
|
import static org.mockito.Mockito.times;
|
||||||
|
@ -47,7 +47,6 @@ import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
@ -65,7 +64,6 @@ import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
|
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
|
||||||
import org.apache.hadoop.hbase.PrivateCellUtil;
|
import org.apache.hadoop.hbase.PrivateCellUtil;
|
||||||
|
@ -73,6 +71,8 @@ import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
|
@ -112,6 +112,7 @@ import org.junit.rules.TestName;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -205,12 +206,12 @@ public class TestHStore {
|
||||||
fs.delete(logdir, true);
|
fs.delete(logdir, true);
|
||||||
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false,
|
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false,
|
||||||
MemStoreLABImpl.CHUNK_SIZE_DEFAULT, 1, 0, null);
|
MemStoreLABImpl.CHUNK_SIZE_DEFAULT, 1, 0, null);
|
||||||
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
|
RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
|
||||||
Configuration walConf = new Configuration(conf);
|
Configuration walConf = new Configuration(conf);
|
||||||
FSUtils.setRootDir(walConf, basedir);
|
FSUtils.setRootDir(walConf, basedir);
|
||||||
WALFactory wals = new WALFactory(walConf, null, methodName);
|
WALFactory wals = new WALFactory(walConf, null, methodName);
|
||||||
region = new HRegion(new HRegionFileSystem(conf, fs, tableDir, info),
|
region = new HRegion(new HRegionFileSystem(conf, fs, tableDir, info), wals.getWAL(info), conf,
|
||||||
wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()), conf, htd, null);
|
htd, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
private HStore init(String methodName, Configuration conf, TableDescriptorBuilder builder,
|
private HStore init(String methodName, Configuration conf, TableDescriptorBuilder builder,
|
||||||
|
@ -1006,7 +1007,6 @@ public class TestHStore {
|
||||||
assertEquals(0, this.store.getStorefilesCount());
|
assertEquals(0, this.store.getStorefilesCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
@Test
|
@Test
|
||||||
public void testRefreshStoreFilesNotChanged() throws IOException {
|
public void testRefreshStoreFilesNotChanged() throws IOException {
|
||||||
init(name.getMethodName());
|
init(name.getMethodName());
|
||||||
|
|
|
@ -26,31 +26,33 @@ import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
import org.apache.hadoop.hbase.Stoppable;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.wal.WALFactory;
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.StoppableImplementation;
|
import org.apache.hadoop.hbase.util.StoppableImplementation;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALFactory;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -73,20 +75,20 @@ public class TestStoreFileRefresherChore {
|
||||||
FSUtils.setRootDir(TEST_UTIL.getConfiguration(), testDir);
|
FSUtils.setRootDir(TEST_UTIL.getConfiguration(), testDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
private HTableDescriptor getTableDesc(TableName tableName, byte[]... families) {
|
private TableDescriptor getTableDesc(TableName tableName, int regionReplication,
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
byte[]... families) {
|
||||||
for (byte[] family : families) {
|
TableDescriptorBuilder builder =
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication);
|
||||||
// Set default to be three versions.
|
Arrays.stream(families).map(family -> ColumnFamilyDescriptorBuilder.newBuilder(family)
|
||||||
hcd.setMaxVersions(Integer.MAX_VALUE);
|
.setMaxVersions(Integer.MAX_VALUE).build()).forEachOrdered(builder::addColumnFamily);
|
||||||
htd.addFamily(hcd);
|
return builder.build();
|
||||||
}
|
|
||||||
return htd;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static class FailingHRegionFileSystem extends HRegionFileSystem {
|
static class FailingHRegionFileSystem extends HRegionFileSystem {
|
||||||
boolean fail = false;
|
boolean fail = false;
|
||||||
FailingHRegionFileSystem(Configuration conf, FileSystem fs, Path tableDir, HRegionInfo regionInfo) {
|
|
||||||
|
FailingHRegionFileSystem(Configuration conf, FileSystem fs, Path tableDir,
|
||||||
|
RegionInfo regionInfo) {
|
||||||
super(conf, fs, tableDir, regionInfo);
|
super(conf, fs, tableDir, regionInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,21 +101,21 @@ public class TestStoreFileRefresherChore {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private HRegion initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId)
|
private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Configuration conf = TEST_UTIL.getConfiguration();
|
Configuration conf = TEST_UTIL.getConfiguration();
|
||||||
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
|
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
|
||||||
|
|
||||||
HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId);
|
RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKey)
|
||||||
|
.setEndKey(stopKey).setRegionId(0L).setReplicaId(replicaId).build();
|
||||||
HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir,
|
HRegionFileSystem fs =
|
||||||
info);
|
new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info);
|
||||||
final Configuration walConf = new Configuration(conf);
|
final Configuration walConf = new Configuration(conf);
|
||||||
FSUtils.setRootDir(walConf, tableDir);
|
FSUtils.setRootDir(walConf, tableDir);
|
||||||
final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId);
|
final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId);
|
||||||
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
||||||
HRegion region =
|
HRegion region =
|
||||||
new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()),
|
new HRegion(fs, wals.getWAL(info),
|
||||||
conf, htd, null);
|
conf, htd, null);
|
||||||
|
|
||||||
region.initialize();
|
region.initialize();
|
||||||
|
@ -188,8 +190,7 @@ public class TestStoreFileRefresherChore {
|
||||||
when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions);
|
when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions);
|
||||||
when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
|
when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
|
||||||
|
|
||||||
HTableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), families);
|
TableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, families);
|
||||||
htd.setRegionReplication(2);
|
|
||||||
HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
|
HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
|
||||||
HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1);
|
HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1);
|
||||||
regions.add(primary);
|
regions.add(primary);
|
||||||
|
|
|
@ -119,7 +119,7 @@ public class TestWALMonotonicallyIncreasingSeqId {
|
||||||
wals = new WALFactory(walConf, null, "log_" + replicaId);
|
wals = new WALFactory(walConf, null, "log_" + replicaId);
|
||||||
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
||||||
HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDefaultRootDirPath(), conf, htd,
|
HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDefaultRootDirPath(), conf, htd,
|
||||||
wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()));
|
wals.getWAL(info));
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
@ -160,12 +159,12 @@ public abstract class AbstractTestLogRolling {
|
||||||
/**
|
/**
|
||||||
* Tests that log rolling doesn't hang when no data is written.
|
* Tests that log rolling doesn't hang when no data is written.
|
||||||
*/
|
*/
|
||||||
@Test(timeout=120000)
|
@Test(timeout = 120000)
|
||||||
public void testLogRollOnNothingWritten() throws Exception {
|
public void testLogRollOnNothingWritten() throws Exception {
|
||||||
final Configuration conf = TEST_UTIL.getConfiguration();
|
final Configuration conf = TEST_UTIL.getConfiguration();
|
||||||
final WALFactory wals = new WALFactory(conf, null,
|
final WALFactory wals =
|
||||||
ServerName.valueOf("test.com",8080, 1).toString());
|
new WALFactory(conf, null, ServerName.valueOf("test.com", 8080, 1).toString());
|
||||||
final WAL newLog = wals.getWAL(new byte[]{}, null);
|
final WAL newLog = wals.getWAL(null);
|
||||||
try {
|
try {
|
||||||
// Now roll the log before we write anything.
|
// Now roll the log before we write anything.
|
||||||
newLog.rollWriter(true);
|
newLog.rollWriter(true);
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.regionserver.wal;
|
package org.apache.hadoop.hbase.regionserver.wal;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
@ -23,20 +22,21 @@ import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.Increment;
|
import org.apache.hadoop.hbase.client.Increment;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.regionserver.ChunkCreator;
|
import org.apache.hadoop.hbase.regionserver.ChunkCreator;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
|
import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
|
||||||
|
@ -52,8 +52,10 @@ import org.junit.After;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
import org.junit.rules.TestName;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
import org.junit.runners.Parameterized;
|
import org.junit.runners.Parameterized;
|
||||||
import org.junit.runners.Parameterized.Parameter;
|
import org.junit.runners.Parameterized.Parameter;
|
||||||
|
@ -63,7 +65,7 @@ import org.junit.runners.Parameterized.Parameters;
|
||||||
* Tests for WAL write durability
|
* Tests for WAL write durability
|
||||||
*/
|
*/
|
||||||
@RunWith(Parameterized.class)
|
@RunWith(Parameterized.class)
|
||||||
@Category({RegionServerTests.class, MediumTests.class})
|
@Category({ RegionServerTests.class, MediumTests.class })
|
||||||
public class TestDurability {
|
public class TestDurability {
|
||||||
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
private static FileSystem FS;
|
private static FileSystem FS;
|
||||||
|
@ -78,6 +80,9 @@ public class TestDurability {
|
||||||
@Parameter
|
@Parameter
|
||||||
public String walProvider;
|
public String walProvider;
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public TestName name = new TestName();
|
||||||
|
|
||||||
@Parameters(name = "{index}: provider={0}")
|
@Parameters(name = "{index}: provider={0}")
|
||||||
public static Iterable<Object[]> data() {
|
public static Iterable<Object[]> data() {
|
||||||
return Arrays.asList(new Object[] { "defaultProvider" }, new Object[] { "asyncfs" });
|
return Arrays.asList(new Object[] { "defaultProvider" }, new Object[] { "asyncfs" });
|
||||||
|
@ -111,12 +116,12 @@ public class TestDurability {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDurability() throws Exception {
|
public void testDurability() throws Exception {
|
||||||
final WALFactory wals = new WALFactory(CONF, null, ServerName.valueOf("TestDurability",
|
WALFactory wals = new WALFactory(CONF, null,
|
||||||
16010, System.currentTimeMillis()).toString());
|
ServerName.valueOf("TestDurability", 16010, System.currentTimeMillis()).toString());
|
||||||
byte[] tableName = Bytes.toBytes("TestDurability");
|
HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
|
||||||
final WAL wal = wals.getWAL(tableName, null);
|
WAL wal = region.getWAL();
|
||||||
HRegion region = createHRegion(tableName, "region", wal, Durability.USE_DEFAULT);
|
HRegion deferredRegion = createHRegion(region.getTableDescriptor(), region.getRegionInfo(),
|
||||||
HRegion deferredRegion = createHRegion(tableName, "deferredRegion", wal, Durability.ASYNC_WAL);
|
"deferredRegion", wal, Durability.ASYNC_WAL);
|
||||||
|
|
||||||
region.put(newPut(null));
|
region.put(newPut(null));
|
||||||
verifyWALCount(wals, wal, 1);
|
verifyWALCount(wals, wal, 1);
|
||||||
|
@ -175,11 +180,10 @@ public class TestDurability {
|
||||||
byte[] col3 = Bytes.toBytes("col3");
|
byte[] col3 = Bytes.toBytes("col3");
|
||||||
|
|
||||||
// Setting up region
|
// Setting up region
|
||||||
final WALFactory wals = new WALFactory(CONF, null,
|
WALFactory wals = new WALFactory(CONF, null,
|
||||||
ServerName.valueOf("TestIncrement", 16010, System.currentTimeMillis()).toString());
|
ServerName.valueOf("TestIncrement", 16010, System.currentTimeMillis()).toString());
|
||||||
byte[] tableName = Bytes.toBytes("TestIncrement");
|
HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
|
||||||
final WAL wal = wals.getWAL(tableName, null);
|
WAL wal = region.getWAL();
|
||||||
HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
|
|
||||||
|
|
||||||
// col1: amount = 0, 1 write back to WAL
|
// col1: amount = 0, 1 write back to WAL
|
||||||
Increment inc1 = new Increment(row1);
|
Increment inc1 = new Increment(row1);
|
||||||
|
@ -231,8 +235,8 @@ public class TestDurability {
|
||||||
assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3)));
|
assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3)));
|
||||||
verifyWALCount(wals, wal, 4);
|
verifyWALCount(wals, wal, 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Test when returnResults set to false in increment it should not return the result instead it
|
* Test when returnResults set to false in increment it should not return the result instead it
|
||||||
* resturn null.
|
* resturn null.
|
||||||
*/
|
*/
|
||||||
|
@ -242,12 +246,11 @@ public class TestDurability {
|
||||||
byte[] col1 = Bytes.toBytes("col1");
|
byte[] col1 = Bytes.toBytes("col1");
|
||||||
|
|
||||||
// Setting up region
|
// Setting up region
|
||||||
final WALFactory wals = new WALFactory(CONF, null,
|
WALFactory wals = new WALFactory(CONF, null,
|
||||||
ServerName.valueOf("testIncrementWithReturnResultsSetToFalse", 16010,
|
ServerName
|
||||||
System.currentTimeMillis()).toString());
|
.valueOf("testIncrementWithReturnResultsSetToFalse", 16010, System.currentTimeMillis())
|
||||||
byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse");
|
.toString());
|
||||||
final WAL wal = wals.getWAL(tableName, null);
|
HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
|
||||||
HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
|
|
||||||
|
|
||||||
Increment inc1 = new Increment(row1);
|
Increment inc1 = new Increment(row1);
|
||||||
inc1.setReturnResults(false);
|
inc1.setReturnResults(false);
|
||||||
|
@ -270,28 +273,38 @@ public class TestDurability {
|
||||||
WAL.Reader reader = wals.createReader(FS, walPath);
|
WAL.Reader reader = wals.createReader(FS, walPath);
|
||||||
int count = 0;
|
int count = 0;
|
||||||
WAL.Entry entry = new WAL.Entry();
|
WAL.Entry entry = new WAL.Entry();
|
||||||
while (reader.next(entry) != null) count++;
|
while (reader.next(entry) != null) {
|
||||||
|
count++;
|
||||||
|
}
|
||||||
reader.close();
|
reader.close();
|
||||||
assertEquals(expected, count);
|
assertEquals(expected, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
// lifted from TestAtomicOperation
|
// lifted from TestAtomicOperation
|
||||||
private HRegion createHRegion (byte [] tableName, String callingMethod,
|
private HRegion createHRegion(WALFactory wals, Durability durability) throws IOException {
|
||||||
WAL log, Durability durability)
|
TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^A-Za-z0-9-_]", "_"));
|
||||||
throws IOException {
|
TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build();
|
||||||
htd.setDurability(durability);
|
RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
|
Path path = new Path(DIR, tableName.getNameAsString());
|
||||||
htd.addFamily(hcd);
|
if (FS.exists(path)) {
|
||||||
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
|
if (!FS.delete(path, true)) {
|
||||||
Path path = new Path(DIR + callingMethod);
|
throw new IOException("Failed delete of " + path);
|
||||||
if (FS.exists(path)) {
|
|
||||||
if (!FS.delete(path, true)) {
|
|
||||||
throw new IOException("Failed delete of " + path);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
|
||||||
return HRegion.createHRegion(info, path, CONF, htd, log);
|
|
||||||
}
|
}
|
||||||
|
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
||||||
|
return HRegion.createHRegion(info, path, CONF, htd, wals.getWAL(info));
|
||||||
|
}
|
||||||
|
|
||||||
|
private HRegion createHRegion(TableDescriptor td, RegionInfo info, String dir, WAL wal,
|
||||||
|
Durability durability) throws IOException {
|
||||||
|
Path path = new Path(DIR, dir);
|
||||||
|
if (FS.exists(path)) {
|
||||||
|
if (!FS.delete(path, true)) {
|
||||||
|
throw new IOException("Failed delete of " + path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
|
||||||
|
return HRegion.createHRegion(info, path, CONF, td, wal);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,22 +21,23 @@ import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.log.HBaseMarkers;
|
import org.apache.hadoop.hbase.log.HBaseMarkers;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
|
@ -136,11 +137,11 @@ public class TestLogRollAbort {
|
||||||
|
|
||||||
// Create the test table and open it
|
// Create the test table and open it
|
||||||
TableName tableName = TableName.valueOf(this.getClass().getSimpleName());
|
TableName tableName = TableName.valueOf(this.getClass().getSimpleName());
|
||||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
|
||||||
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build();
|
||||||
|
|
||||||
admin.createTable(desc);
|
admin.createTable(desc);
|
||||||
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
|
Table table = TEST_UTIL.getConnection().getTable(tableName);
|
||||||
try {
|
try {
|
||||||
HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
|
HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
|
||||||
WAL log = server.getWAL(null);
|
WAL log = server.getWAL(null);
|
||||||
|
@ -189,32 +190,26 @@ public class TestLogRollAbort {
|
||||||
// put some entries in an WAL
|
// put some entries in an WAL
|
||||||
TableName tableName =
|
TableName tableName =
|
||||||
TableName.valueOf(this.getClass().getName());
|
TableName.valueOf(this.getClass().getName());
|
||||||
HRegionInfo regioninfo = new HRegionInfo(tableName,
|
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
WAL log = wals.getWAL(regionInfo);
|
||||||
final WAL log = wals.getWAL(regioninfo.getEncodedNameAsBytes(),
|
|
||||||
regioninfo.getTable().getNamespace());
|
|
||||||
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
||||||
|
|
||||||
final int total = 20;
|
int total = 20;
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
WALEdit kvs = new WALEdit();
|
WALEdit kvs = new WALEdit();
|
||||||
kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
|
kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
htd.addFamily(new HColumnDescriptor("column"));
|
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
scopes.put(Bytes.toBytes("column"), 0);
|
||||||
scopes.put(fam, 0);
|
log.append(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName,
|
||||||
}
|
System.currentTimeMillis(), mvcc, scopes),
|
||||||
log.append(regioninfo, new WALKeyImpl(regioninfo.getEncodedNameAsBytes(), tableName,
|
kvs, true);
|
||||||
System.currentTimeMillis(), mvcc, scopes), kvs, true);
|
|
||||||
}
|
}
|
||||||
// Send the data to HDFS datanodes and close the HDFS writer
|
// Send the data to HDFS datanodes and close the HDFS writer
|
||||||
log.sync();
|
log.sync();
|
||||||
((AbstractFSWAL<?>) log).replaceWriter(((FSHLog)log).getOldPath(), null, null);
|
((AbstractFSWAL<?>) log).replaceWriter(((FSHLog)log).getOldPath(), null, null);
|
||||||
|
|
||||||
/* code taken from MasterFileSystem.getLogDirs(), which is called from MasterFileSystem.splitLog()
|
// code taken from MasterFileSystem.getLogDirs(), which is called from
|
||||||
* handles RS shutdowns (as observed by the splitting process)
|
// MasterFileSystem.splitLog() handles RS shutdowns (as observed by the splitting process)
|
||||||
*/
|
|
||||||
// rename the directory so a rogue RS doesn't create more WALs
|
// rename the directory so a rogue RS doesn't create more WALs
|
||||||
Path rsSplitDir = thisTestsDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
|
Path rsSplitDir = thisTestsDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
|
||||||
if (!fs.rename(thisTestsDir, rsSplitDir)) {
|
if (!fs.rename(thisTestsDir, rsSplitDir)) {
|
||||||
|
|
|
@ -18,20 +18,21 @@
|
||||||
package org.apache.hadoop.hbase.regionserver.wal;
|
package org.apache.hadoop.hbase.regionserver.wal;
|
||||||
|
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.concurrent.ThreadLocalRandom;
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.CategoryBasedTimeout;
|
import org.apache.hadoop.hbase.CategoryBasedTimeout;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
|
@ -89,8 +90,8 @@ public class TestLogRollingNoCluster {
|
||||||
FSUtils.setRootDir(conf, dir);
|
FSUtils.setRootDir(conf, dir);
|
||||||
conf.set("hbase.regionserver.hlog.writer.impl", HighLatencySyncWriter.class.getName());
|
conf.set("hbase.regionserver.hlog.writer.impl", HighLatencySyncWriter.class.getName());
|
||||||
final WALFactory wals = new WALFactory(conf, null, TestLogRollingNoCluster.class.getName());
|
final WALFactory wals = new WALFactory(conf, null, TestLogRollingNoCluster.class.getName());
|
||||||
final WAL wal = wals.getWAL(new byte[]{}, null);
|
final WAL wal = wals.getWAL(null);
|
||||||
|
|
||||||
Appender [] appenders = null;
|
Appender [] appenders = null;
|
||||||
|
|
||||||
final int numThreads = NUM_THREADS;
|
final int numThreads = NUM_THREADS;
|
||||||
|
@ -157,10 +158,10 @@ public class TestLogRollingNoCluster {
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
byte[] bytes = Bytes.toBytes(i);
|
byte[] bytes = Bytes.toBytes(i);
|
||||||
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
|
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
|
||||||
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
|
RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
|
||||||
final HTableDescriptor htd = TEST_UTIL.getMetaTableDescriptor();
|
TableDescriptor htd = TEST_UTIL.getMetaTableDescriptorBuilder().build();
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
for(byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
scopes.put(fam, 0);
|
scopes.put(fam, 0);
|
||||||
}
|
}
|
||||||
final long txid = wal.append(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(),
|
final long txid = wal.append(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(),
|
||||||
|
|
|
@ -24,17 +24,15 @@ import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
@ -100,23 +98,20 @@ public class TestWALActionsListener {
|
||||||
list.add(observer);
|
list.add(observer);
|
||||||
final WALFactory wals = new WALFactory(conf, list, "testActionListener");
|
final WALFactory wals = new WALFactory(conf, list, "testActionListener");
|
||||||
DummyWALActionsListener laterobserver = new DummyWALActionsListener();
|
DummyWALActionsListener laterobserver = new DummyWALActionsListener();
|
||||||
HRegionInfo hri = new HRegionInfo(TableName.valueOf(SOME_BYTES),
|
RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(SOME_BYTES))
|
||||||
SOME_BYTES, SOME_BYTES, false);
|
.setStartKey(SOME_BYTES).setEndKey(SOME_BYTES).build();
|
||||||
final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
|
final WAL wal = wals.getWAL(hri);
|
||||||
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
byte[] b = Bytes.toBytes(i + "");
|
byte[] b = Bytes.toBytes(i + "");
|
||||||
KeyValue kv = new KeyValue(b,b,b);
|
KeyValue kv = new KeyValue(b, b, b);
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
edit.add(kv);
|
edit.add(kv);
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(SOME_BYTES));
|
|
||||||
htd.addFamily(new HColumnDescriptor(b));
|
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
scopes.put(b, 0);
|
||||||
scopes.put(fam, 0);
|
long txid = wal.append(hri,
|
||||||
}
|
new WALKeyImpl(hri.getEncodedNameAsBytes(), TableName.valueOf(b), 0, mvcc, scopes), edit,
|
||||||
final long txid = wal.append(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(),
|
true);
|
||||||
TableName.valueOf(b), 0, mvcc, scopes), edit, true);
|
|
||||||
wal.sync(txid);
|
wal.sync(txid);
|
||||||
if (i == 10) {
|
if (i == 10) {
|
||||||
wal.registerWALActionsListener(laterobserver);
|
wal.registerWALActionsListener(laterobserver);
|
||||||
|
|
|
@ -267,7 +267,7 @@ public abstract class TestReplicationSourceManager {
|
||||||
listeners.add(replication);
|
listeners.add(replication);
|
||||||
final WALFactory wals = new WALFactory(utility.getConfiguration(), listeners,
|
final WALFactory wals = new WALFactory(utility.getConfiguration(), listeners,
|
||||||
URLEncoder.encode("regionserver:60020", "UTF8"));
|
URLEncoder.encode("regionserver:60020", "UTF8"));
|
||||||
final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
|
final WAL wal = wals.getWAL(hri);
|
||||||
manager.init();
|
manager.init();
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("tableame"));
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("tableame"));
|
||||||
htd.addFamily(new HColumnDescriptor(f1));
|
htd.addFamily(new HColumnDescriptor(f1));
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/**
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -35,16 +34,16 @@ import java.util.OptionalLong;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.concurrent.PriorityBlockingQueue;
|
import java.util.concurrent.PriorityBlockingQueue;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
||||||
import org.apache.hadoop.hbase.replication.WALEntryFilter;
|
import org.apache.hadoop.hbase.replication.WALEntryFilter;
|
||||||
|
@ -78,8 +77,8 @@ public class TestWALEntryStream {
|
||||||
private static final TableName tableName = TableName.valueOf("tablename");
|
private static final TableName tableName = TableName.valueOf("tablename");
|
||||||
private static final byte[] family = Bytes.toBytes("column");
|
private static final byte[] family = Bytes.toBytes("column");
|
||||||
private static final byte[] qualifier = Bytes.toBytes("qualifier");
|
private static final byte[] qualifier = Bytes.toBytes("qualifier");
|
||||||
private static final HRegionInfo info =
|
private static final RegionInfo info = RegionInfoBuilder.newBuilder(tableName)
|
||||||
new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.LAST_ROW, false);
|
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.LAST_ROW).build();
|
||||||
private static final NavigableMap<byte[], Integer> scopes = getScopes();
|
private static final NavigableMap<byte[], Integer> scopes = getScopes();
|
||||||
|
|
||||||
private static NavigableMap<byte[], Integer> getScopes() {
|
private static NavigableMap<byte[], Integer> getScopes() {
|
||||||
|
@ -118,7 +117,7 @@ public class TestWALEntryStream {
|
||||||
pathWatcher = new PathWatcher();
|
pathWatcher = new PathWatcher();
|
||||||
listeners.add(pathWatcher);
|
listeners.add(pathWatcher);
|
||||||
final WALFactory wals = new WALFactory(conf, listeners, tn.getMethodName());
|
final WALFactory wals = new WALFactory(conf, listeners, tn.getMethodName());
|
||||||
log = wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
|
log = wals.getWAL(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
|
|
@ -26,47 +26,45 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
// imports for things that haven't moved from regionserver.wal yet.
|
// imports for things that haven't moved from regionserver.wal yet.
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
|
import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
|
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
||||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||||
import org.apache.hadoop.hbase.wal.WAL.Entry;
|
import org.apache.hadoop.hbase.wal.WAL.Entry;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A WAL Provider that returns a single thread safe WAL that optionally can skip parts of our
|
* A WAL Provider that returns a single thread safe WAL that optionally can skip parts of our normal
|
||||||
* normal interactions with HDFS.
|
* interactions with HDFS.
|
||||||
*
|
* <p>
|
||||||
* This implementation picks a directory in HDFS based on the same mechanisms as the
|
* This implementation picks a directory in HDFS based on the same mechanisms as the
|
||||||
* {@link FSHLogProvider}. Users can configure how much interaction
|
* {@link FSHLogProvider}. Users can configure how much interaction we have with HDFS with the
|
||||||
* we have with HDFS with the configuration property "hbase.wal.iotestprovider.operations".
|
* configuration property "hbase.wal.iotestprovider.operations". The value should be a comma
|
||||||
* The value should be a comma separated list of allowed operations:
|
* separated list of allowed operations:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li><em>append</em> : edits will be written to the underlying filesystem
|
* <li><em>append</em> : edits will be written to the underlying filesystem</li>
|
||||||
* <li><em>sync</em> : wal syncs will result in hflush calls
|
* <li><em>sync</em> : wal syncs will result in hflush calls</li>
|
||||||
* <li><em>fileroll</em> : roll requests will result in creating a new file on the underlying
|
* <li><em>fileroll</em> : roll requests will result in creating a new file on the underlying
|
||||||
* filesystem.
|
* filesystem.</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
* Additionally, the special cases "all" and "none" are recognized.
|
* Additionally, the special cases "all" and "none" are recognized. If ommited, the value defaults
|
||||||
* If ommited, the value defaults to "all."
|
* to "all." Behavior is undefined if "all" or "none" are paired with additional values. Behavior is
|
||||||
* Behavior is undefined if "all" or "none" are paired with additional values. Behavior is also
|
* also undefined if values not listed above are included.
|
||||||
* undefined if values not listed above are included.
|
* <p>
|
||||||
*
|
* Only those operations listed will occur between the returned WAL and HDFS. All others will be
|
||||||
* Only those operations listed will occur between the returned WAL and HDFS. All others
|
* no-ops.
|
||||||
* will be no-ops.
|
* <p>
|
||||||
*
|
|
||||||
* Note that in the case of allowing "append" operations but not allowing "fileroll", the returned
|
* Note that in the case of allowing "append" operations but not allowing "fileroll", the returned
|
||||||
* WAL will just keep writing to the same file. This won't avoid all costs associated with file
|
* WAL will just keep writing to the same file. This won't avoid all costs associated with file
|
||||||
* management over time, becaue the data set size may result in additional HDFS block allocations.
|
* management over time, becaue the data set size may result in additional HDFS block allocations.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class IOTestProvider implements WALProvider {
|
public class IOTestProvider implements WALProvider {
|
||||||
|
@ -114,7 +112,7 @@ public class IOTestProvider implements WALProvider {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public WAL getWAL(final byte[] identifier, byte[] namespace) throws IOException {
|
public WAL getWAL(RegionInfo region) throws IOException {
|
||||||
return log;
|
return log;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,23 +20,24 @@ package org.apache.hadoop.hbase.wal;
|
||||||
|
|
||||||
import static org.apache.hadoop.hbase.wal.BoundedGroupingStrategy.DEFAULT_NUM_REGION_GROUPS;
|
import static org.apache.hadoop.hbase.wal.BoundedGroupingStrategy.DEFAULT_NUM_REGION_GROUPS;
|
||||||
import static org.apache.hadoop.hbase.wal.BoundedGroupingStrategy.NUM_REGION_GROUPS;
|
import static org.apache.hadoop.hbase.wal.BoundedGroupingStrategy.NUM_REGION_GROUPS;
|
||||||
import static org.apache.hadoop.hbase.wal.RegionGroupingProvider.*;
|
import static org.apache.hadoop.hbase.wal.RegionGroupingProvider.DELEGATE_PROVIDER;
|
||||||
|
import static org.apache.hadoop.hbase.wal.RegionGroupingProvider.REGION_GROUPING_STRATEGY;
|
||||||
import static org.apache.hadoop.hbase.wal.WALFactory.WAL_PROVIDER;
|
import static org.apache.hadoop.hbase.wal.WALFactory.WAL_PROVIDER;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Random;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -179,12 +180,13 @@ public class TestBoundedRegionGroupingStrategy {
|
||||||
FSUtils.setRootDir(CONF, TEST_UTIL.getDataTestDirOnTestFS());
|
FSUtils.setRootDir(CONF, TEST_UTIL.getDataTestDirOnTestFS());
|
||||||
|
|
||||||
wals = new WALFactory(CONF, null, "setMembershipDedups");
|
wals = new WALFactory(CONF, null, "setMembershipDedups");
|
||||||
final Set<WAL> seen = new HashSet<>(temp * 4);
|
Set<WAL> seen = new HashSet<>(temp * 4);
|
||||||
final Random random = new Random();
|
|
||||||
int count = 0;
|
int count = 0;
|
||||||
// we know that this should see one of the wals more than once
|
// we know that this should see one of the wals more than once
|
||||||
for (int i = 0; i < temp * 8; i++) {
|
for (int i = 0; i < temp * 8; i++) {
|
||||||
final WAL maybeNewWAL = wals.getWAL(Bytes.toBytes(random.nextInt()), null);
|
WAL maybeNewWAL = wals.getWAL(RegionInfoBuilder
|
||||||
|
.newBuilder(TableName.valueOf("Table-" + ThreadLocalRandom.current().nextInt()))
|
||||||
|
.build());
|
||||||
LOG.info("Iteration " + i + ", checking wal " + maybeNewWAL);
|
LOG.info("Iteration " + i + ", checking wal " + maybeNewWAL);
|
||||||
if (seen.add(maybeNewWAL)) {
|
if (seen.add(maybeNewWAL)) {
|
||||||
count++;
|
count++;
|
||||||
|
|
|
@ -26,29 +26,28 @@ import static org.junit.Assert.assertTrue;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.Random;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
// imports for things that haven't moved from regionserver.wal yet.
|
// imports for things that haven't moved from regionserver.wal yet.
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
@ -63,10 +62,10 @@ import org.slf4j.LoggerFactory;
|
||||||
public class TestFSHLogProvider {
|
public class TestFSHLogProvider {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(TestFSHLogProvider.class);
|
private static final Logger LOG = LoggerFactory.getLogger(TestFSHLogProvider.class);
|
||||||
|
|
||||||
protected static Configuration conf;
|
private static Configuration conf;
|
||||||
protected static FileSystem fs;
|
private static FileSystem fs;
|
||||||
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
protected MultiVersionConcurrencyControl mvcc;
|
private MultiVersionConcurrencyControl mvcc;
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public final TestName currentTest = new TestName();
|
public final TestName currentTest = new TestName();
|
||||||
|
@ -80,10 +79,6 @@ public class TestFSHLogProvider {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
|
||||||
public void tearDown() throws Exception {
|
|
||||||
}
|
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUpBeforeClass() throws Exception {
|
public static void setUpBeforeClass() throws Exception {
|
||||||
// Make block sizes small.
|
// Make block sizes small.
|
||||||
|
@ -149,15 +144,15 @@ public class TestFSHLogProvider {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
protected void addEdits(WAL log, HRegionInfo hri, HTableDescriptor htd,
|
private void addEdits(WAL log, RegionInfo hri, TableDescriptor htd, int times,
|
||||||
int times, NavigableMap<byte[], Integer> scopes) throws IOException {
|
NavigableMap<byte[], Integer> scopes) throws IOException {
|
||||||
final byte[] row = Bytes.toBytes("row");
|
final byte[] row = Bytes.toBytes("row");
|
||||||
for (int i = 0; i < times; i++) {
|
for (int i = 0; i < times; i++) {
|
||||||
long timestamp = System.currentTimeMillis();
|
long timestamp = System.currentTimeMillis();
|
||||||
WALEdit cols = new WALEdit();
|
WALEdit cols = new WALEdit();
|
||||||
cols.add(new KeyValue(row, row, row, timestamp, row));
|
cols.add(new KeyValue(row, row, row, timestamp, row));
|
||||||
log.append(hri, getWalKey(hri.getEncodedNameAsBytes(), htd.getTableName(), timestamp, scopes),
|
log.append(hri, getWalKey(hri.getEncodedNameAsBytes(), htd.getTableName(), timestamp, scopes),
|
||||||
cols, true);
|
cols, true);
|
||||||
}
|
}
|
||||||
log.sync();
|
log.sync();
|
||||||
}
|
}
|
||||||
|
@ -181,37 +176,31 @@ public class TestFSHLogProvider {
|
||||||
wal.completeCacheFlush(regionEncodedName);
|
wal.completeCacheFlush(regionEncodedName);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final byte[] UNSPECIFIED_REGION = new byte[]{};
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testLogCleaning() throws Exception {
|
public void testLogCleaning() throws Exception {
|
||||||
LOG.info(currentTest.getMethodName());
|
LOG.info(currentTest.getMethodName());
|
||||||
final HTableDescriptor htd =
|
TableDescriptor htd =
|
||||||
new HTableDescriptor(TableName.valueOf(currentTest.getMethodName())).addFamily(new HColumnDescriptor(
|
TableDescriptorBuilder.newBuilder(TableName.valueOf(currentTest.getMethodName()))
|
||||||
"row"));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
|
||||||
final HTableDescriptor htd2 =
|
TableDescriptor htd2 =
|
||||||
new HTableDescriptor(TableName.valueOf(currentTest.getMethodName() + "2"))
|
TableDescriptorBuilder.newBuilder(TableName.valueOf(currentTest.getMethodName() + "2"))
|
||||||
.addFamily(new HColumnDescriptor("row"));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
|
||||||
NavigableMap<byte[], Integer> scopes1 = new TreeMap<>(
|
NavigableMap<byte[], Integer> scopes1 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
Bytes.BYTES_COMPARATOR);
|
for (byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
|
||||||
scopes1.put(fam, 0);
|
scopes1.put(fam, 0);
|
||||||
}
|
}
|
||||||
NavigableMap<byte[], Integer> scopes2 = new TreeMap<>(
|
NavigableMap<byte[], Integer> scopes2 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
Bytes.BYTES_COMPARATOR);
|
for (byte[] fam : htd2.getColumnFamilyNames()) {
|
||||||
for(byte[] fam : htd2.getFamiliesKeys()) {
|
|
||||||
scopes2.put(fam, 0);
|
scopes2.put(fam, 0);
|
||||||
}
|
}
|
||||||
final Configuration localConf = new Configuration(conf);
|
Configuration localConf = new Configuration(conf);
|
||||||
localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName());
|
localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName());
|
||||||
final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
|
WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
|
||||||
try {
|
try {
|
||||||
HRegionInfo hri = new HRegionInfo(htd.getTableName(),
|
RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
RegionInfo hri2 = RegionInfoBuilder.newBuilder(htd2.getTableName()).build();
|
||||||
HRegionInfo hri2 = new HRegionInfo(htd2.getTableName(),
|
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
|
||||||
// we want to mix edits from regions, so pick our own identifier.
|
// we want to mix edits from regions, so pick our own identifier.
|
||||||
final WAL log = wals.getWAL(UNSPECIFIED_REGION, null);
|
WAL log = wals.getWAL(null);
|
||||||
|
|
||||||
// Add a single edit and make sure that rolling won't remove the file
|
// Add a single edit and make sure that rolling won't remove the file
|
||||||
// Before HBASE-3198 it used to delete it
|
// Before HBASE-3198 it used to delete it
|
||||||
|
@ -235,7 +224,7 @@ public class TestFSHLogProvider {
|
||||||
// Flush the first region, we expect to see the first two files getting
|
// Flush the first region, we expect to see the first two files getting
|
||||||
// archived. We need to append something or writer won't be rolled.
|
// archived. We need to append something or writer won't be rolled.
|
||||||
addEdits(log, hri2, htd2, 1, scopes2);
|
addEdits(log, hri2, htd2, 1, scopes2);
|
||||||
log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
|
log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getColumnFamilyNames());
|
||||||
log.completeCacheFlush(hri.getEncodedNameAsBytes());
|
log.completeCacheFlush(hri.getEncodedNameAsBytes());
|
||||||
log.rollWriter();
|
log.rollWriter();
|
||||||
assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(log));
|
assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(log));
|
||||||
|
@ -244,7 +233,7 @@ public class TestFSHLogProvider {
|
||||||
// since the oldest was completely flushed and the two others only contain
|
// since the oldest was completely flushed and the two others only contain
|
||||||
// flush information
|
// flush information
|
||||||
addEdits(log, hri2, htd2, 1, scopes2);
|
addEdits(log, hri2, htd2, 1, scopes2);
|
||||||
log.startCacheFlush(hri2.getEncodedNameAsBytes(), htd2.getFamiliesKeys());
|
log.startCacheFlush(hri2.getEncodedNameAsBytes(), htd2.getColumnFamilyNames());
|
||||||
log.completeCacheFlush(hri2.getEncodedNameAsBytes());
|
log.completeCacheFlush(hri2.getEncodedNameAsBytes());
|
||||||
log.rollWriter();
|
log.rollWriter();
|
||||||
assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(log));
|
assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(log));
|
||||||
|
@ -270,35 +259,28 @@ public class TestFSHLogProvider {
|
||||||
@Test
|
@Test
|
||||||
public void testWALArchiving() throws IOException {
|
public void testWALArchiving() throws IOException {
|
||||||
LOG.debug(currentTest.getMethodName());
|
LOG.debug(currentTest.getMethodName());
|
||||||
HTableDescriptor table1 =
|
TableDescriptor table1 =
|
||||||
new HTableDescriptor(TableName.valueOf(currentTest.getMethodName() + "1")).addFamily(new HColumnDescriptor("row"));
|
TableDescriptorBuilder.newBuilder(TableName.valueOf(currentTest.getMethodName() + "1"))
|
||||||
HTableDescriptor table2 =
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
|
||||||
new HTableDescriptor(TableName.valueOf(currentTest.getMethodName() + "2")).addFamily(new HColumnDescriptor("row"));
|
TableDescriptor table2 =
|
||||||
NavigableMap<byte[], Integer> scopes1 = new TreeMap<>(
|
TableDescriptorBuilder.newBuilder(TableName.valueOf(currentTest.getMethodName() + "2"))
|
||||||
Bytes.BYTES_COMPARATOR);
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
|
||||||
for(byte[] fam : table1.getFamiliesKeys()) {
|
NavigableMap<byte[], Integer> scopes1 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
|
for (byte[] fam : table1.getColumnFamilyNames()) {
|
||||||
scopes1.put(fam, 0);
|
scopes1.put(fam, 0);
|
||||||
}
|
}
|
||||||
NavigableMap<byte[], Integer> scopes2 = new TreeMap<>(
|
NavigableMap<byte[], Integer> scopes2 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
Bytes.BYTES_COMPARATOR);
|
for (byte[] fam : table2.getColumnFamilyNames()) {
|
||||||
for(byte[] fam : table2.getFamiliesKeys()) {
|
|
||||||
scopes2.put(fam, 0);
|
scopes2.put(fam, 0);
|
||||||
}
|
}
|
||||||
final Configuration localConf = new Configuration(conf);
|
Configuration localConf = new Configuration(conf);
|
||||||
localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName());
|
localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName());
|
||||||
final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
|
WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
|
||||||
try {
|
try {
|
||||||
final WAL wal = wals.getWAL(UNSPECIFIED_REGION, null);
|
WAL wal = wals.getWAL(null);
|
||||||
assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
||||||
HRegionInfo hri1 =
|
RegionInfo hri1 = RegionInfoBuilder.newBuilder(table1.getTableName()).build();
|
||||||
new HRegionInfo(table1.getTableName(), HConstants.EMPTY_START_ROW,
|
RegionInfo hri2 = RegionInfoBuilder.newBuilder(table2.getTableName()).build();
|
||||||
HConstants.EMPTY_END_ROW);
|
|
||||||
HRegionInfo hri2 =
|
|
||||||
new HRegionInfo(table2.getTableName(), HConstants.EMPTY_START_ROW,
|
|
||||||
HConstants.EMPTY_END_ROW);
|
|
||||||
// ensure that we don't split the regions.
|
|
||||||
hri1.setSplit(false);
|
|
||||||
hri2.setSplit(false);
|
|
||||||
// variables to mock region sequenceIds.
|
// variables to mock region sequenceIds.
|
||||||
// start with the testing logic: insert a waledit, and roll writer
|
// start with the testing logic: insert a waledit, and roll writer
|
||||||
addEdits(wal, hri1, table1, 1, scopes1);
|
addEdits(wal, hri1, table1, 1, scopes1);
|
||||||
|
@ -312,7 +294,7 @@ public class TestFSHLogProvider {
|
||||||
assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
||||||
// add a waledit to table1, and flush the region.
|
// add a waledit to table1, and flush the region.
|
||||||
addEdits(wal, hri1, table1, 3, scopes1);
|
addEdits(wal, hri1, table1, 3, scopes1);
|
||||||
flushRegion(wal, hri1.getEncodedNameAsBytes(), table1.getFamiliesKeys());
|
flushRegion(wal, hri1.getEncodedNameAsBytes(), table1.getColumnFamilyNames());
|
||||||
// roll log; all old logs should be archived.
|
// roll log; all old logs should be archived.
|
||||||
wal.rollWriter();
|
wal.rollWriter();
|
||||||
assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
||||||
|
@ -326,7 +308,7 @@ public class TestFSHLogProvider {
|
||||||
assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
||||||
// add edits for table2, and flush hri1.
|
// add edits for table2, and flush hri1.
|
||||||
addEdits(wal, hri2, table2, 2, scopes2);
|
addEdits(wal, hri2, table2, 2, scopes2);
|
||||||
flushRegion(wal, hri1.getEncodedNameAsBytes(), table2.getFamiliesKeys());
|
flushRegion(wal, hri1.getEncodedNameAsBytes(), table2.getColumnFamilyNames());
|
||||||
// the log : region-sequenceId map is
|
// the log : region-sequenceId map is
|
||||||
// log1: region2 (unflushed)
|
// log1: region2 (unflushed)
|
||||||
// log2: region1 (flushed)
|
// log2: region1 (flushed)
|
||||||
|
@ -336,7 +318,7 @@ public class TestFSHLogProvider {
|
||||||
assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
||||||
// flush region2, and all logs should be archived.
|
// flush region2, and all logs should be archived.
|
||||||
addEdits(wal, hri2, table2, 2, scopes2);
|
addEdits(wal, hri2, table2, 2, scopes2);
|
||||||
flushRegion(wal, hri2.getEncodedNameAsBytes(), table2.getFamiliesKeys());
|
flushRegion(wal, hri2.getEncodedNameAsBytes(), table2.getColumnFamilyNames());
|
||||||
wal.rollWriter();
|
wal.rollWriter();
|
||||||
assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal));
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -365,18 +347,20 @@ public class TestFSHLogProvider {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void setMembershipDedups() throws IOException {
|
public void setMembershipDedups() throws IOException {
|
||||||
final Configuration localConf = new Configuration(conf);
|
Configuration localConf = new Configuration(conf);
|
||||||
localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName());
|
localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName());
|
||||||
final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
|
WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
|
||||||
try {
|
try {
|
||||||
final Set<WAL> seen = new HashSet<>(1);
|
final Set<WAL> seen = new HashSet<>(1);
|
||||||
final Random random = new Random();
|
|
||||||
assertTrue("first attempt to add WAL from default provider should work.",
|
assertTrue("first attempt to add WAL from default provider should work.",
|
||||||
seen.add(wals.getWAL(Bytes.toBytes(random.nextInt()), null)));
|
seen.add(wals.getWAL(null)));
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
assertFalse("default wal provider is only supposed to return a single wal, which should "
|
assertFalse(
|
||||||
+ "compare as .equals itself.",
|
"default wal provider is only supposed to return a single wal, which should " +
|
||||||
seen.add(wals.getWAL(Bytes.toBytes(random.nextInt()), null)));
|
"compare as .equals itself.",
|
||||||
|
seen.add(wals.getWAL(RegionInfoBuilder
|
||||||
|
.newBuilder(TableName.valueOf("Table-" + ThreadLocalRandom.current().nextInt()))
|
||||||
|
.build())));
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
wals.close();
|
wals.close();
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -25,7 +25,6 @@ import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
@ -33,12 +32,11 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
|
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.SecureAsyncProtobufLogWriter;
|
import org.apache.hadoop.hbase.regionserver.wal.SecureAsyncProtobufLogWriter;
|
||||||
|
@ -106,14 +104,9 @@ public class TestSecureWAL {
|
||||||
@Test
|
@Test
|
||||||
public void testSecureWAL() throws Exception {
|
public void testSecureWAL() throws Exception {
|
||||||
TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_"));
|
TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_"));
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
scopes.put(tableName.getName(), 0);
|
||||||
scopes.put(fam, 0);
|
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
}
|
|
||||||
HRegionInfo regioninfo = new HRegionInfo(tableName,
|
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
|
|
||||||
final int total = 10;
|
final int total = 10;
|
||||||
final byte[] row = Bytes.toBytes("row");
|
final byte[] row = Bytes.toBytes("row");
|
||||||
final byte[] family = Bytes.toBytes("family");
|
final byte[] family = Bytes.toBytes("family");
|
||||||
|
@ -123,15 +116,14 @@ public class TestSecureWAL {
|
||||||
new WALFactory(TEST_UTIL.getConfiguration(), null, tableName.getNameAsString());
|
new WALFactory(TEST_UTIL.getConfiguration(), null, tableName.getNameAsString());
|
||||||
|
|
||||||
// Write the WAL
|
// Write the WAL
|
||||||
final WAL wal =
|
final WAL wal = wals.getWAL(regionInfo);
|
||||||
wals.getWAL(regioninfo.getEncodedNameAsBytes(), regioninfo.getTable().getNamespace());
|
|
||||||
|
|
||||||
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
||||||
|
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
WALEdit kvs = new WALEdit();
|
WALEdit kvs = new WALEdit();
|
||||||
kvs.add(new KeyValue(row, family, Bytes.toBytes(i), value));
|
kvs.add(new KeyValue(row, family, Bytes.toBytes(i), value));
|
||||||
wal.append(regioninfo, new WALKeyImpl(regioninfo.getEncodedNameAsBytes(), tableName,
|
wal.append(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName,
|
||||||
System.currentTimeMillis(), mvcc, scopes), kvs, true);
|
System.currentTimeMillis(), mvcc, scopes), kvs, true);
|
||||||
}
|
}
|
||||||
wal.sync();
|
wal.sync();
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/**
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -30,7 +29,6 @@ import java.net.BindException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
@ -41,14 +39,15 @@ import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.Coprocessor;
|
import org.apache.hadoop.hbase.Coprocessor;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||||
import org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor;
|
import org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
|
@ -169,27 +168,24 @@ public class TestWALFactory {
|
||||||
final byte [] rowName = tableName.getName();
|
final byte [] rowName = tableName.getName();
|
||||||
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
||||||
final int howmany = 3;
|
final int howmany = 3;
|
||||||
HRegionInfo[] infos = new HRegionInfo[3];
|
RegionInfo[] infos = new RegionInfo[3];
|
||||||
Path tabledir = FSUtils.getTableDir(hbaseWALDir, tableName);
|
Path tabledir = FSUtils.getTableDir(hbaseWALDir, tableName);
|
||||||
fs.mkdirs(tabledir);
|
fs.mkdirs(tabledir);
|
||||||
for(int i = 0; i < howmany; i++) {
|
for (int i = 0; i < howmany; i++) {
|
||||||
infos[i] = new HRegionInfo(tableName,
|
infos[i] = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("" + i))
|
||||||
Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
|
.setEndKey(Bytes.toBytes("" + (i + 1))).build();
|
||||||
fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
|
fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
|
||||||
LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
|
LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
|
||||||
}
|
}
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
htd.addFamily(new HColumnDescriptor("column"));
|
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
scopes.put(Bytes.toBytes("column"), 0);
|
||||||
scopes.put(fam, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add edits for three regions.
|
// Add edits for three regions.
|
||||||
for (int ii = 0; ii < howmany; ii++) {
|
for (int ii = 0; ii < howmany; ii++) {
|
||||||
for (int i = 0; i < howmany; i++) {
|
for (int i = 0; i < howmany; i++) {
|
||||||
final WAL log =
|
final WAL log =
|
||||||
wals.getWAL(infos[i].getEncodedNameAsBytes(), infos[i].getTable().getNamespace());
|
wals.getWAL(infos[i]);
|
||||||
for (int j = 0; j < howmany; j++) {
|
for (int j = 0; j < howmany; j++) {
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
byte [] family = Bytes.toBytes("column");
|
byte [] family = Bytes.toBytes("column");
|
||||||
|
@ -254,15 +250,10 @@ public class TestWALFactory {
|
||||||
WAL.Reader reader = null;
|
WAL.Reader reader = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
HRegionInfo info = new HRegionInfo(tableName,
|
RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
null,null, false);
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
scopes.put(tableName.getName(), 0);
|
||||||
scopes.put(fam, 0);
|
final WAL wal = wals.getWAL(info);
|
||||||
}
|
|
||||||
final WAL wal = wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
|
|
||||||
|
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
WALEdit kvs = new WALEdit();
|
WALEdit kvs = new WALEdit();
|
||||||
|
@ -374,24 +365,18 @@ public class TestWALFactory {
|
||||||
public void testAppendClose() throws Exception {
|
public void testAppendClose() throws Exception {
|
||||||
TableName tableName =
|
TableName tableName =
|
||||||
TableName.valueOf(currentTest.getMethodName());
|
TableName.valueOf(currentTest.getMethodName());
|
||||||
HRegionInfo regioninfo = new HRegionInfo(tableName,
|
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
|
|
||||||
|
|
||||||
final WAL wal =
|
WAL wal = wals.getWAL(regionInfo);
|
||||||
wals.getWAL(regioninfo.getEncodedNameAsBytes(), regioninfo.getTable().getNamespace());
|
int total = 20;
|
||||||
final int total = 20;
|
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
scopes.put(tableName.getName(), 0);
|
||||||
scopes.put(fam, 0);
|
|
||||||
}
|
|
||||||
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
WALEdit kvs = new WALEdit();
|
WALEdit kvs = new WALEdit();
|
||||||
kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
|
kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
|
||||||
wal.append(regioninfo, new WALKeyImpl(regioninfo.getEncodedNameAsBytes(), tableName,
|
wal.append(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName,
|
||||||
System.currentTimeMillis(), mvcc, scopes),
|
System.currentTimeMillis(), mvcc, scopes),
|
||||||
kvs, true);
|
kvs, true);
|
||||||
}
|
}
|
||||||
|
@ -496,20 +481,18 @@ public class TestWALFactory {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests that we can write out an edit, close, and then read it back in again.
|
* Tests that we can write out an edit, close, and then read it back in again.
|
||||||
* @throws IOException
|
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testEditAdd() throws IOException {
|
public void testEditAdd() throws IOException {
|
||||||
final int COL_COUNT = 10;
|
int colCount = 10;
|
||||||
final HTableDescriptor htd =
|
TableDescriptor htd =
|
||||||
new HTableDescriptor(TableName.valueOf(currentTest.getMethodName())).addFamily(new HColumnDescriptor(
|
TableDescriptorBuilder.newBuilder(TableName.valueOf(currentTest.getMethodName()))
|
||||||
"column"));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("column")).build();
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(
|
NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
|
||||||
Bytes.BYTES_COMPARATOR);
|
for (byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
|
||||||
scopes.put(fam, 0);
|
scopes.put(fam, 0);
|
||||||
}
|
}
|
||||||
final byte [] row = Bytes.toBytes("row");
|
byte[] row = Bytes.toBytes("row");
|
||||||
WAL.Reader reader = null;
|
WAL.Reader reader = null;
|
||||||
try {
|
try {
|
||||||
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
||||||
|
@ -518,21 +501,21 @@ public class TestWALFactory {
|
||||||
// 1, 2, 3...
|
// 1, 2, 3...
|
||||||
long timestamp = System.currentTimeMillis();
|
long timestamp = System.currentTimeMillis();
|
||||||
WALEdit cols = new WALEdit();
|
WALEdit cols = new WALEdit();
|
||||||
for (int i = 0; i < COL_COUNT; i++) {
|
for (int i = 0; i < colCount; i++) {
|
||||||
cols.add(new KeyValue(row, Bytes.toBytes("column"),
|
cols.add(new KeyValue(row, Bytes.toBytes("column"),
|
||||||
Bytes.toBytes(Integer.toString(i)),
|
Bytes.toBytes(Integer.toString(i)),
|
||||||
timestamp, new byte[] { (byte)(i + '0') }));
|
timestamp, new byte[] { (byte)(i + '0') }));
|
||||||
}
|
}
|
||||||
HRegionInfo info = new HRegionInfo(htd.getTableName(),
|
RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(row)
|
||||||
row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
|
.setEndKey(Bytes.toBytes(Bytes.toString(row) + "1")).build();
|
||||||
final WAL log = wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
|
final WAL log = wals.getWAL(info);
|
||||||
|
|
||||||
final long txid = log.append(info,
|
final long txid = log.append(info,
|
||||||
new WALKeyImpl(info.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(),
|
new WALKeyImpl(info.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(),
|
||||||
mvcc, scopes),
|
mvcc, scopes),
|
||||||
cols, true);
|
cols, true);
|
||||||
log.sync(txid);
|
log.sync(txid);
|
||||||
log.startCacheFlush(info.getEncodedNameAsBytes(), htd.getFamiliesKeys());
|
log.startCacheFlush(info.getEncodedNameAsBytes(), htd.getColumnFamilyNames());
|
||||||
log.completeCacheFlush(info.getEncodedNameAsBytes());
|
log.completeCacheFlush(info.getEncodedNameAsBytes());
|
||||||
log.shutdown();
|
log.shutdown();
|
||||||
Path filename = AbstractFSWALProvider.getCurrentFileName(log);
|
Path filename = AbstractFSWALProvider.getCurrentFileName(log);
|
||||||
|
@ -560,21 +543,17 @@ public class TestWALFactory {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
@Test
|
@Test
|
||||||
public void testAppend() throws IOException {
|
public void testAppend() throws IOException {
|
||||||
final int COL_COUNT = 10;
|
int colCount = 10;
|
||||||
final HTableDescriptor htd =
|
TableDescriptor htd =
|
||||||
new HTableDescriptor(TableName.valueOf(currentTest.getMethodName())).addFamily(new HColumnDescriptor(
|
TableDescriptorBuilder.newBuilder(TableName.valueOf(currentTest.getMethodName()))
|
||||||
"column"));
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.of("column")).build();
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(
|
NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
|
||||||
Bytes.BYTES_COMPARATOR);
|
for (byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
|
||||||
scopes.put(fam, 0);
|
scopes.put(fam, 0);
|
||||||
}
|
}
|
||||||
final byte [] row = Bytes.toBytes("row");
|
byte[] row = Bytes.toBytes("row");
|
||||||
WAL.Reader reader = null;
|
WAL.Reader reader = null;
|
||||||
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
||||||
try {
|
try {
|
||||||
|
@ -582,27 +561,26 @@ public class TestWALFactory {
|
||||||
// 1, 2, 3...
|
// 1, 2, 3...
|
||||||
long timestamp = System.currentTimeMillis();
|
long timestamp = System.currentTimeMillis();
|
||||||
WALEdit cols = new WALEdit();
|
WALEdit cols = new WALEdit();
|
||||||
for (int i = 0; i < COL_COUNT; i++) {
|
for (int i = 0; i < colCount; i++) {
|
||||||
cols.add(new KeyValue(row, Bytes.toBytes("column"),
|
cols.add(new KeyValue(row, Bytes.toBytes("column"),
|
||||||
Bytes.toBytes(Integer.toString(i)),
|
Bytes.toBytes(Integer.toString(i)),
|
||||||
timestamp, new byte[] { (byte)(i + '0') }));
|
timestamp, new byte[] { (byte)(i + '0') }));
|
||||||
}
|
}
|
||||||
HRegionInfo hri = new HRegionInfo(htd.getTableName(),
|
RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
final WAL log = wals.getWAL(hri);
|
||||||
final WAL log = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
|
|
||||||
final long txid = log.append(hri,
|
final long txid = log.append(hri,
|
||||||
new WALKeyImpl(hri.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(),
|
new WALKeyImpl(hri.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(),
|
||||||
mvcc, scopes),
|
mvcc, scopes),
|
||||||
cols, true);
|
cols, true);
|
||||||
log.sync(txid);
|
log.sync(txid);
|
||||||
log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
|
log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getColumnFamilyNames());
|
||||||
log.completeCacheFlush(hri.getEncodedNameAsBytes());
|
log.completeCacheFlush(hri.getEncodedNameAsBytes());
|
||||||
log.shutdown();
|
log.shutdown();
|
||||||
Path filename = AbstractFSWALProvider.getCurrentFileName(log);
|
Path filename = AbstractFSWALProvider.getCurrentFileName(log);
|
||||||
// Now open a reader on the log and assert append worked.
|
// Now open a reader on the log and assert append worked.
|
||||||
reader = wals.createReader(fs, filename);
|
reader = wals.createReader(fs, filename);
|
||||||
WAL.Entry entry = reader.next();
|
WAL.Entry entry = reader.next();
|
||||||
assertEquals(COL_COUNT, entry.getEdit().size());
|
assertEquals(colCount, entry.getEdit().size());
|
||||||
int idx = 0;
|
int idx = 0;
|
||||||
for (Cell val : entry.getEdit().getCells()) {
|
for (Cell val : entry.getEdit().getCells()) {
|
||||||
assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
|
assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
|
||||||
|
@ -633,15 +611,11 @@ public class TestWALFactory {
|
||||||
final DumbWALActionsListener visitor = new DumbWALActionsListener();
|
final DumbWALActionsListener visitor = new DumbWALActionsListener();
|
||||||
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
||||||
long timestamp = System.currentTimeMillis();
|
long timestamp = System.currentTimeMillis();
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
htd.addFamily(new HColumnDescriptor("column"));
|
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
scopes.put(Bytes.toBytes("column"), 0);
|
||||||
scopes.put(fam, 0);
|
|
||||||
}
|
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
HRegionInfo hri = new HRegionInfo(tableName,
|
final WAL log = wals.getWAL(hri);
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
|
||||||
final WAL log = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
|
|
||||||
log.registerWALActionsListener(visitor);
|
log.registerWALActionsListener(visitor);
|
||||||
for (int i = 0; i < COL_COUNT; i++) {
|
for (int i = 0; i < COL_COUNT; i++) {
|
||||||
WALEdit cols = new WALEdit();
|
WALEdit cols = new WALEdit();
|
||||||
|
@ -670,7 +644,7 @@ public class TestWALFactory {
|
||||||
@Test
|
@Test
|
||||||
public void testWALCoprocessorLoaded() throws Exception {
|
public void testWALCoprocessorLoaded() throws Exception {
|
||||||
// test to see whether the coprocessor is loaded or not.
|
// test to see whether the coprocessor is loaded or not.
|
||||||
WALCoprocessorHost host = wals.getWAL(UNSPECIFIED_REGION, null).getCoprocessorHost();
|
WALCoprocessorHost host = wals.getWAL(null).getCoprocessorHost();
|
||||||
Coprocessor c = host.findCoprocessor(SampleRegionWALCoprocessor.class);
|
Coprocessor c = host.findCoprocessor(SampleRegionWALCoprocessor.class);
|
||||||
assertNotNull(c);
|
assertNotNull(c);
|
||||||
}
|
}
|
||||||
|
@ -690,7 +664,4 @@ public class TestWALFactory {
|
||||||
increments++;
|
increments++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final byte[] UNSPECIFIED_REGION = new byte[]{};
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,7 +99,7 @@ public class TestWALMethods {
|
||||||
|
|
||||||
final Configuration walConf = new Configuration(util.getConfiguration());
|
final Configuration walConf = new Configuration(util.getConfiguration());
|
||||||
FSUtils.setRootDir(walConf, regiondir);
|
FSUtils.setRootDir(walConf, regiondir);
|
||||||
(new WALFactory(walConf, null, "dummyLogName")).getWAL(new byte[] {}, null);
|
(new WALFactory(walConf, null, "dummyLogName")).getWAL(null);
|
||||||
|
|
||||||
NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir);
|
NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir);
|
||||||
assertEquals(7, files.size());
|
assertEquals(7, files.size());
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -25,22 +25,20 @@ import java.io.IOException;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.ByteBufferKeyValue;
|
import org.apache.hadoop.hbase.ByteBufferKeyValue;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
|
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
// imports for things that haven't moved from regionserver.wal yet.
|
// imports for things that haven't moved from regionserver.wal yet.
|
||||||
|
@ -85,7 +83,6 @@ public class TestWALReaderOnSecureWAL {
|
||||||
FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
|
FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
private Path writeWAL(final WALFactory wals, final String tblName, boolean offheap) throws IOException {
|
private Path writeWAL(final WALFactory wals, final String tblName, boolean offheap) throws IOException {
|
||||||
Configuration conf = TEST_UTIL.getConfiguration();
|
Configuration conf = TEST_UTIL.getConfiguration();
|
||||||
String clsName = conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
|
String clsName = conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
|
||||||
|
@ -93,22 +90,16 @@ public class TestWALReaderOnSecureWAL {
|
||||||
WALCellCodec.class);
|
WALCellCodec.class);
|
||||||
try {
|
try {
|
||||||
TableName tableName = TableName.valueOf(tblName);
|
TableName tableName = TableName.valueOf(tblName);
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
scopes.put(tableName.getName(), 0);
|
||||||
scopes.put(fam, 0);
|
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
}
|
|
||||||
HRegionInfo regioninfo = new HRegionInfo(tableName,
|
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
|
|
||||||
final int total = 10;
|
final int total = 10;
|
||||||
final byte[] row = Bytes.toBytes("row");
|
final byte[] row = Bytes.toBytes("row");
|
||||||
final byte[] family = Bytes.toBytes("family");
|
final byte[] family = Bytes.toBytes("family");
|
||||||
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
|
||||||
|
|
||||||
// Write the WAL
|
// Write the WAL
|
||||||
WAL wal =
|
WAL wal = wals.getWAL(regionInfo);
|
||||||
wals.getWAL(regioninfo.getEncodedNameAsBytes(), regioninfo.getTable().getNamespace());
|
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
WALEdit kvs = new WALEdit();
|
WALEdit kvs = new WALEdit();
|
||||||
KeyValue kv = new KeyValue(row, family, Bytes.toBytes(i), value);
|
KeyValue kv = new KeyValue(row, family, Bytes.toBytes(i), value);
|
||||||
|
@ -120,7 +111,7 @@ public class TestWALReaderOnSecureWAL {
|
||||||
} else {
|
} else {
|
||||||
kvs.add(kv);
|
kvs.add(kv);
|
||||||
}
|
}
|
||||||
wal.append(regioninfo, new WALKeyImpl(regioninfo.getEncodedNameAsBytes(), tableName,
|
wal.append(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName,
|
||||||
System.currentTimeMillis(), mvcc, scopes), kvs, true);
|
System.currentTimeMillis(), mvcc, scopes), kvs, true);
|
||||||
}
|
}
|
||||||
wal.sync();
|
wal.sync();
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -17,17 +17,22 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.wal;
|
package org.apache.hadoop.hbase.wal;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -40,13 +45,6 @@ import org.junit.experimental.categories.Category;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
|
|
||||||
@Category(MediumTests.class)
|
@Category(MediumTests.class)
|
||||||
public class TestWALRootDir {
|
public class TestWALRootDir {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(TestWALRootDir.class);
|
private static final Logger LOG = LoggerFactory.getLogger(TestWALRootDir.class);
|
||||||
|
@ -54,10 +52,9 @@ public class TestWALRootDir {
|
||||||
private static Configuration conf;
|
private static Configuration conf;
|
||||||
private static FileSystem fs;
|
private static FileSystem fs;
|
||||||
private static FileSystem walFs;
|
private static FileSystem walFs;
|
||||||
static final TableName tableName = TableName.valueOf("TestWALWALDir");
|
private static final TableName tableName = TableName.valueOf("TestWALWALDir");
|
||||||
private static final byte [] rowName = Bytes.toBytes("row");
|
private static final byte [] rowName = Bytes.toBytes("row");
|
||||||
private static final byte [] family = Bytes.toBytes("column");
|
private static final byte [] family = Bytes.toBytes("column");
|
||||||
private static HTableDescriptor htd;
|
|
||||||
private static Path walRootDir;
|
private static Path walRootDir;
|
||||||
private static Path rootDir;
|
private static Path rootDir;
|
||||||
private static WALFactory wals;
|
private static WALFactory wals;
|
||||||
|
@ -75,8 +72,6 @@ public class TestWALRootDir {
|
||||||
walRootDir = TEST_UTIL.createWALRootDir();
|
walRootDir = TEST_UTIL.createWALRootDir();
|
||||||
fs = FSUtils.getRootDirFileSystem(conf);
|
fs = FSUtils.getRootDirFileSystem(conf);
|
||||||
walFs = FSUtils.getWALFileSystem(conf);
|
walFs = FSUtils.getWALFileSystem(conf);
|
||||||
htd = new HTableDescriptor(tableName);
|
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
@ -87,9 +82,9 @@ public class TestWALRootDir {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testWALRootDir() throws Exception {
|
public void testWALRootDir() throws Exception {
|
||||||
HRegionInfo regionInfo = new HRegionInfo(tableName);
|
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
wals = new WALFactory(conf, null, "testWALRootDir");
|
wals = new WALFactory(conf, null, "testWALRootDir");
|
||||||
WAL log = wals.getWAL(regionInfo.getEncodedNameAsBytes(), regionInfo.getTable().getNamespace());
|
WAL log = wals.getWAL(regionInfo);
|
||||||
|
|
||||||
assertEquals(1, getWALFiles(walFs, walRootDir).size());
|
assertEquals(1, getWALFiles(walFs, walRootDir).size());
|
||||||
byte [] value = Bytes.toBytes("value");
|
byte [] value = Bytes.toBytes("value");
|
||||||
|
@ -117,7 +112,7 @@ public class TestWALRootDir {
|
||||||
new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME)).size());
|
new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME)).size());
|
||||||
}
|
}
|
||||||
|
|
||||||
protected WALKeyImpl getWalKey(final long time, HRegionInfo hri, final long startPoint) {
|
private WALKeyImpl getWalKey(final long time, RegionInfo hri, final long startPoint) {
|
||||||
return new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, time,
|
return new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, time,
|
||||||
new MultiVersionConcurrencyControl(startPoint));
|
new MultiVersionConcurrencyControl(startPoint));
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,10 +23,6 @@ import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
|
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
|
@ -45,7 +41,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
@ -58,18 +53,16 @@ import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.FaultyProtobufLogReader;
|
import org.apache.hadoop.hbase.regionserver.wal.FaultyProtobufLogReader;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter;
|
import org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
|
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -98,6 +91,14 @@ import org.mockito.stubbing.Answer;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
|
||||||
|
import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Testing {@link WAL} splitting code.
|
* Testing {@link WAL} splitting code.
|
||||||
*/
|
*/
|
||||||
|
@ -373,10 +374,10 @@ public class TestWALSplit {
|
||||||
*/
|
*/
|
||||||
@Test (timeout=300000)
|
@Test (timeout=300000)
|
||||||
public void testRecoveredEditsPathForMeta() throws IOException {
|
public void testRecoveredEditsPathForMeta() throws IOException {
|
||||||
byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
|
byte[] encoded = RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
|
||||||
Path tdir = FSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME);
|
Path tdir = FSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME);
|
||||||
Path regiondir = new Path(tdir,
|
Path regiondir = new Path(tdir,
|
||||||
HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
|
RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
|
||||||
fs.mkdirs(regiondir);
|
fs.mkdirs(regiondir);
|
||||||
long now = System.currentTimeMillis();
|
long now = System.currentTimeMillis();
|
||||||
Entry entry =
|
Entry entry =
|
||||||
|
@ -386,7 +387,7 @@ public class TestWALSplit {
|
||||||
Path p = WALSplitter.getRegionSplitEditsPath(fs, entry, HBASEDIR,
|
Path p = WALSplitter.getRegionSplitEditsPath(fs, entry, HBASEDIR,
|
||||||
FILENAME_BEING_SPLIT);
|
FILENAME_BEING_SPLIT);
|
||||||
String parentOfParent = p.getParent().getParent().getName();
|
String parentOfParent = p.getParent().getParent().getName();
|
||||||
assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
|
assertEquals(parentOfParent, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -395,10 +396,10 @@ public class TestWALSplit {
|
||||||
*/
|
*/
|
||||||
@Test (timeout=300000)
|
@Test (timeout=300000)
|
||||||
public void testOldRecoveredEditsFileSidelined() throws IOException {
|
public void testOldRecoveredEditsFileSidelined() throws IOException {
|
||||||
byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
|
byte [] encoded = RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
|
||||||
Path tdir = FSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME);
|
Path tdir = FSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME);
|
||||||
Path regiondir = new Path(tdir,
|
Path regiondir = new Path(tdir,
|
||||||
HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
|
RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
|
||||||
fs.mkdirs(regiondir);
|
fs.mkdirs(regiondir);
|
||||||
long now = System.currentTimeMillis();
|
long now = System.currentTimeMillis();
|
||||||
Entry entry =
|
Entry entry =
|
||||||
|
@ -412,7 +413,7 @@ public class TestWALSplit {
|
||||||
Path p = WALSplitter.getRegionSplitEditsPath(fs, entry, HBASEDIR,
|
Path p = WALSplitter.getRegionSplitEditsPath(fs, entry, HBASEDIR,
|
||||||
FILENAME_BEING_SPLIT);
|
FILENAME_BEING_SPLIT);
|
||||||
String parentOfParent = p.getParent().getParent().getName();
|
String parentOfParent = p.getParent().getParent().getName();
|
||||||
assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
|
assertEquals(parentOfParent, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
|
||||||
WALFactory.createRecoveredEditsWriter(fs, p, conf).close();
|
WALFactory.createRecoveredEditsWriter(fs, p, conf).close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -459,7 +460,7 @@ public class TestWALSplit {
|
||||||
|
|
||||||
@Test (timeout=300000)
|
@Test (timeout=300000)
|
||||||
public void testSplitLeavesCompactionEventsEdits() throws IOException{
|
public void testSplitLeavesCompactionEventsEdits() throws IOException{
|
||||||
HRegionInfo hri = new HRegionInfo(TABLE_NAME);
|
RegionInfo hri = RegionInfoBuilder.newBuilder(TABLE_NAME).build();
|
||||||
REGIONS.clear();
|
REGIONS.clear();
|
||||||
REGIONS.add(hri.getEncodedName());
|
REGIONS.add(hri.getEncodedName());
|
||||||
Path regionDir = new Path(FSUtils.getTableDir(HBASEDIR, TABLE_NAME), hri.getEncodedName());
|
Path regionDir = new Path(FSUtils.getTableDir(HBASEDIR, TABLE_NAME), hri.getEncodedName());
|
||||||
|
@ -1132,7 +1133,7 @@ public class TestWALSplit {
|
||||||
REGIONS.add(regionName);
|
REGIONS.add(regionName);
|
||||||
generateWALs(-1);
|
generateWALs(-1);
|
||||||
|
|
||||||
wals.getWAL(Bytes.toBytes(regionName), null);
|
wals.getWAL(null);
|
||||||
FileStatus[] logfiles = fs.listStatus(WALDIR);
|
FileStatus[] logfiles = fs.listStatus(WALDIR);
|
||||||
assertTrue("There should be some log file",
|
assertTrue("There should be some log file",
|
||||||
logfiles != null && logfiles.length > 0);
|
logfiles != null && logfiles.length > 0);
|
||||||
|
@ -1337,7 +1338,7 @@ public class TestWALSplit {
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void appendCompactionEvent(Writer w, HRegionInfo hri, String[] inputs,
|
private static void appendCompactionEvent(Writer w, RegionInfo hri, String[] inputs,
|
||||||
String output) throws IOException {
|
String output) throws IOException {
|
||||||
WALProtos.CompactionDescriptor.Builder desc = WALProtos.CompactionDescriptor.newBuilder();
|
WALProtos.CompactionDescriptor.Builder desc = WALProtos.CompactionDescriptor.newBuilder();
|
||||||
desc.setTableName(ByteString.copyFrom(hri.getTable().toBytes()))
|
desc.setTableName(ByteString.copyFrom(hri.getTable().toBytes()))
|
||||||
|
|
|
@ -20,6 +20,11 @@ package org.apache.hadoop.hbase.wal;
|
||||||
|
|
||||||
import static com.codahale.metrics.MetricRegistry.name;
|
import static com.codahale.metrics.MetricRegistry.name;
|
||||||
|
|
||||||
|
import com.codahale.metrics.ConsoleReporter;
|
||||||
|
import com.codahale.metrics.Histogram;
|
||||||
|
import com.codahale.metrics.Meter;
|
||||||
|
import com.codahale.metrics.MetricFilter;
|
||||||
|
import com.codahale.metrics.MetricRegistry;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
@ -30,7 +35,7 @@ import java.util.Random;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.stream.IntStream;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
@ -39,14 +44,15 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.MockRegionServerServices;
|
import org.apache.hadoop.hbase.MockRegionServerServices;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
|
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.LogRoller;
|
import org.apache.hadoop.hbase.regionserver.LogRoller;
|
||||||
|
@ -71,12 +77,6 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.codahale.metrics.ConsoleReporter;
|
|
||||||
import com.codahale.metrics.Histogram;
|
|
||||||
import com.codahale.metrics.Meter;
|
|
||||||
import com.codahale.metrics.MetricFilter;
|
|
||||||
import com.codahale.metrics.MetricRegistry;
|
|
||||||
|
|
||||||
// imports for things that haven't moved from regionserver.wal yet.
|
// imports for things that haven't moved from regionserver.wal yet.
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -131,11 +131,10 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
private final boolean noSync;
|
private final boolean noSync;
|
||||||
private final HRegion region;
|
private final HRegion region;
|
||||||
private final int syncInterval;
|
private final int syncInterval;
|
||||||
private final HTableDescriptor htd;
|
|
||||||
private final Sampler loopSampler;
|
private final Sampler loopSampler;
|
||||||
private final NavigableMap<byte[], Integer> scopes;
|
private final NavigableMap<byte[], Integer> scopes;
|
||||||
|
|
||||||
WALPutBenchmark(final HRegion region, final HTableDescriptor htd,
|
WALPutBenchmark(final HRegion region, final TableDescriptor htd,
|
||||||
final long numIterations, final boolean noSync, final int syncInterval,
|
final long numIterations, final boolean noSync, final int syncInterval,
|
||||||
final double traceFreq) {
|
final double traceFreq) {
|
||||||
this.numIterations = numIterations;
|
this.numIterations = numIterations;
|
||||||
|
@ -143,9 +142,8 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
this.syncInterval = syncInterval;
|
this.syncInterval = syncInterval;
|
||||||
this.numFamilies = htd.getColumnFamilyCount();
|
this.numFamilies = htd.getColumnFamilyCount();
|
||||||
this.region = region;
|
this.region = region;
|
||||||
this.htd = htd;
|
|
||||||
scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getFamiliesKeys()) {
|
for(byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
scopes.put(fam, 0);
|
scopes.put(fam, 0);
|
||||||
}
|
}
|
||||||
String spanReceivers = getConf().get("hbase.trace.spanreceiver.classes");
|
String spanReceivers = getConf().get("hbase.trace.spanreceiver.classes");
|
||||||
|
@ -320,7 +318,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
if (rootRegionDir == null) {
|
if (rootRegionDir == null) {
|
||||||
rootRegionDir = TEST_UTIL.getDataTestDirOnTestFS("WALPerformanceEvaluation");
|
rootRegionDir = TEST_UTIL.getDataTestDirOnTestFS("WALPerformanceEvaluation");
|
||||||
}
|
}
|
||||||
rootRegionDir = rootRegionDir.makeQualified(fs);
|
rootRegionDir = rootRegionDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
|
||||||
cleanRegionRootDir(fs, rootRegionDir);
|
cleanRegionRootDir(fs, rootRegionDir);
|
||||||
FSUtils.setRootDir(getConf(), rootRegionDir);
|
FSUtils.setRootDir(getConf(), rootRegionDir);
|
||||||
final WALFactory wals = new WALFactory(getConf(), null, "wals");
|
final WALFactory wals = new WALFactory(getConf(), null, "wals");
|
||||||
|
@ -334,7 +332,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
for(int i = 0; i < numRegions; i++) {
|
for(int i = 0; i < numRegions; i++) {
|
||||||
// Initialize Table Descriptor
|
// Initialize Table Descriptor
|
||||||
// a table per desired region means we can avoid carving up the key space
|
// a table per desired region means we can avoid carving up the key space
|
||||||
final HTableDescriptor htd = createHTableDescriptor(i, numFamilies);
|
final TableDescriptor htd = createHTableDescriptor(i, numFamilies);
|
||||||
regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller);
|
regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller);
|
||||||
benchmarks[i] = TraceUtil.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
|
benchmarks[i] = TraceUtil.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
|
||||||
syncInterval, traceFreq), "");
|
syncInterval, traceFreq), "");
|
||||||
|
@ -401,14 +399,14 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
return(0);
|
return(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static HTableDescriptor createHTableDescriptor(final int regionNum,
|
private static TableDescriptor createHTableDescriptor(final int regionNum,
|
||||||
final int numFamilies) {
|
final int numFamilies) {
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME + ":" + regionNum));
|
TableDescriptorBuilder builder =
|
||||||
for (int i = 0; i < numFamilies; ++i) {
|
TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE_NAME + ":" + regionNum));
|
||||||
HColumnDescriptor colDef = new HColumnDescriptor(FAMILY_PREFIX + i);
|
IntStream.range(0, numFamilies)
|
||||||
htd.addFamily(colDef);
|
.mapToObj(i -> ColumnFamilyDescriptorBuilder.of(FAMILY_PREFIX + i))
|
||||||
}
|
.forEachOrdered(builder::addColumnFamily);
|
||||||
return htd;
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -495,13 +493,12 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
|
|
||||||
private final Set<WAL> walsListenedTo = new HashSet<>();
|
private final Set<WAL> walsListenedTo = new HashSet<>();
|
||||||
|
|
||||||
private HRegion openRegion(final FileSystem fs, final Path dir, final HTableDescriptor htd,
|
private HRegion openRegion(final FileSystem fs, final Path dir, final TableDescriptor htd,
|
||||||
final WALFactory wals, final long whenToRoll, final LogRoller roller) throws IOException {
|
final WALFactory wals, final long whenToRoll, final LogRoller roller) throws IOException {
|
||||||
// Initialize HRegion
|
// Initialize HRegion
|
||||||
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
|
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
|
||||||
// Initialize WAL
|
// Initialize WAL
|
||||||
final WAL wal =
|
final WAL wal = wals.getWAL(regionInfo);
|
||||||
wals.getWAL(regionInfo.getEncodedNameAsBytes(), regionInfo.getTable().getNamespace());
|
|
||||||
// If we haven't already, attach a listener to this wal to handle rolls and metrics.
|
// If we haven't already, attach a listener to this wal to handle rolls and metrics.
|
||||||
if (walsListenedTo.add(wal)) {
|
if (walsListenedTo.add(wal)) {
|
||||||
roller.addWAL(wal);
|
roller.addWAL(wal);
|
||||||
|
|
Loading…
Reference in New Issue