HBASE-27274 Re-enable the disabled tests when implementing HBASE-27212 (#5178)

Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
Liangjun He 2023-04-18 21:50:32 +08:00 committed by Duo Zhang
parent 8fe691c705
commit 679dfcfd4f
1 changed files with 83 additions and 53 deletions

View File

@ -17,16 +17,33 @@
*/ */
package org.apache.hadoop.hbase.util; package org.apache.hadoop.hbase.util;
import static org.junit.Assert.assertEquals;
import java.util.Collections;
import java.util.List;
import java.util.stream.Stream;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.replication.ReplicationGroupOffset;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
import org.apache.hadoop.hbase.replication.ReplicationQueueId;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.junit.AfterClass; import org.apache.hadoop.hbase.util.HbckErrorReporter.ERROR_CODE;
import org.junit.BeforeClass; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.Ignore; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
@Category({ ReplicationTests.class, MediumTests.class }) @Category({ ReplicationTests.class, MediumTests.class })
public class TestHBaseFsckReplication { public class TestHBaseFsckReplication {
@ -36,65 +53,78 @@ public class TestHBaseFsckReplication {
HBaseClassTestRule.forClass(TestHBaseFsckReplication.class); HBaseClassTestRule.forClass(TestHBaseFsckReplication.class);
private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
@Rule
public final TestName name = new TestName();
@BeforeClass @Before
public static void setUp() throws Exception { public void setUp() throws Exception {
UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false); UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false);
UTIL.startMiniCluster(1); UTIL.startMiniCluster(1);
TableName tableName = TableName.valueOf("replication_" + name.getMethodName());
UTIL.getAdmin()
.createTable(ReplicationStorageFactory.createReplicationQueueTableDescriptor(tableName));
UTIL.getConfiguration().set(ReplicationStorageFactory.REPLICATION_QUEUE_TABLE_NAME,
tableName.getNameAsString());
} }
@AfterClass @After
public static void tearDown() throws Exception { public void tearDown() throws Exception {
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }
// TODO: reimplement
@Ignore
@Test @Test
public void test() throws Exception { public void test() throws Exception {
// ReplicationPeerStorage peerStorage = ReplicationStorageFactory ReplicationPeerStorage peerStorage = ReplicationStorageFactory.getReplicationPeerStorage(
// .getReplicationPeerStorage(UTIL.getZooKeeperWatcher(), UTIL.getConfiguration()); UTIL.getTestFileSystem(), UTIL.getZooKeeperWatcher(), UTIL.getConfiguration());
// ReplicationQueueStorage queueStorage = ReplicationStorageFactory ReplicationQueueStorage queueStorage = ReplicationStorageFactory
// .getReplicationQueueStorage(UTIL.getZooKeeperWatcher(), UTIL.getConfiguration()); .getReplicationQueueStorage(UTIL.getConnection(), UTIL.getConfiguration());
//
// String peerId1 = "1"; String peerId1 = "1";
// String peerId2 = "2"; String peerId2 = "2";
// peerStorage.addPeer(peerId1, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(), peerStorage.addPeer(peerId1, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
// true, SyncReplicationState.NONE); true, SyncReplicationState.NONE);
// peerStorage.addPeer(peerId2, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(), peerStorage.addPeer(peerId2, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
// true, SyncReplicationState.NONE); true, SyncReplicationState.NONE);
// for (int i = 0; i < 10; i++) { ReplicationQueueId queueId = null;
// queueStorage.addWAL(ServerName.valueOf("localhost", 10000 + i, 100000 + i), peerId1, for (int i = 0; i < 10; i++) {
// "file-" + i); queueId = new ReplicationQueueId(getServerName(i), peerId1);
// } queueStorage.setOffset(queueId, "group-" + i,
// queueStorage.addWAL(ServerName.valueOf("localhost", 10000, 100000), peerId2, "file"); new ReplicationGroupOffset("file-" + i, i * 100), Collections.emptyMap());
// HBaseFsck fsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), true); }
// HbckTestingUtil.assertNoErrors(fsck); queueId = new ReplicationQueueId(getServerName(0), peerId2);
// queueStorage.setOffset(queueId, "group-" + 0, new ReplicationGroupOffset("file-" + 0, 100),
// // should not remove anything since the replication peer is still alive Collections.emptyMap());
// assertEquals(10, queueStorage.getListOfReplicators().size()); HBaseFsck fsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), true);
// peerStorage.removePeer(peerId1); HbckTestingUtil.assertNoErrors(fsck);
// // there should be orphan queues
// assertEquals(10, queueStorage.getListOfReplicators().size()); // should not remove anything since the replication peer is still alive
// fsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), false); assertEquals(10, queueStorage.listAllReplicators().size());
// HbckTestingUtil.assertErrors(fsck, Stream.generate(() -> { peerStorage.removePeer(peerId1);
// return ERROR_CODE.UNDELETED_REPLICATION_QUEUE; // there should be orphan queues
// }).limit(10).toArray(ERROR_CODE[]::new)); assertEquals(10, queueStorage.listAllReplicators().size());
// fsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), false);
// // should not delete anything when fix is false HbckTestingUtil.assertErrors(fsck, Stream.generate(() -> {
// assertEquals(10, queueStorage.getListOfReplicators().size()); return ERROR_CODE.UNDELETED_REPLICATION_QUEUE;
// }).limit(10).toArray(ERROR_CODE[]::new));
// fsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), true);
// HbckTestingUtil.assertErrors(fsck, Stream.generate(() -> { // should not delete anything when fix is false
// return ERROR_CODE.UNDELETED_REPLICATION_QUEUE; assertEquals(10, queueStorage.listAllReplicators().size());
// }).limit(10).toArray(ERROR_CODE[]::new));
// fsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), true);
// List<ServerName> replicators = queueStorage.getListOfReplicators(); HbckTestingUtil.assertErrors(fsck, Stream.generate(() -> {
// // should not remove the server with queue for peerId2 return ERROR_CODE.UNDELETED_REPLICATION_QUEUE;
// assertEquals(1, replicators.size()); }).limit(10).toArray(HbckErrorReporter.ERROR_CODE[]::new));
// assertEquals(ServerName.valueOf("localhost", 10000, 100000), replicators.get(0));
// for (String queueId : queueStorage.getAllQueues(replicators.get(0))) { List<ServerName> replicators = queueStorage.listAllReplicators();
// assertEquals(peerId2, queueId); // should not remove the server with queue for peerId2
// } assertEquals(1, replicators.size());
assertEquals(ServerName.valueOf("localhost", 10000, 100000), replicators.get(0));
for (ReplicationQueueId qId : queueStorage.listAllQueueIds(replicators.get(0))) {
assertEquals(peerId2, qId.getPeerId());
}
}
private ServerName getServerName(int i) {
return ServerName.valueOf("localhost", 10000 + i, 100000 + i);
} }
} }