From fe0541b58d8a1aead015bb440528ec84e25ec1c9 Mon Sep 17 00:00:00 2001 From: GuoPhilipse <46367746+GuoPhilipse@users.noreply.github.com> Date: Tue, 14 Feb 2023 15:29:38 +0800 Subject: [PATCH] HDFS-16913. Fix flaky some unit tests since they offen timeout (#5377) Co-authored-by: gf13871 Reviewed-by: Tao Li Reviewed-by: Shilun Fan Signed-off-by: Shilun Fan --- .../apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java | 2 +- .../java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java | 6 +++--- .../hadoop/hdfs/server/balancer/TestBalancerService.java | 2 +- .../hdfs/server/namenode/ha/TestPipelinesFailover.java | 2 +- .../hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java index c8420ca82a8..f993db0f1fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java @@ -32,7 +32,7 @@ public class TestFileLengthOnClusterRestart { * Tests the fileLength when we sync the file and restart the cluster and * Datanodes not report to Namenode yet. */ - @Test(timeout = 60000) + @Test(timeout = 120000) public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister() throws Exception { final Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index bfa3deaa6b1..5d472439550 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -493,17 +493,17 @@ public void testSoftLeaseRecovery() throws Exception { * * @throws Exception */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception { hardLeaseRecoveryRestartHelper(false, -1); } - @Test(timeout = 30000) + @Test(timeout = 60000) public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception { hardLeaseRecoveryRestartHelper(false, 1535); } - @Test(timeout = 30000) + @Test(timeout = 60000) public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart() throws Exception { hardLeaseRecoveryRestartHelper(true, -1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java index a75b5092df7..eb0a8ef860a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java @@ -168,7 +168,7 @@ public void testBalancerServiceBalanceTwice() throws Exception { } } - @Test(timeout = 60000) + @Test(timeout = 120000) public void testBalancerServiceOnError() throws Exception { Configuration conf = new HdfsConfiguration(); // retry for every 5 seconds diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java index 260dd7049d5..cc80af6fb1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java @@ -210,7 +210,7 @@ public void testWriteOverGracefulFailoverWithDnFail() throws Exception { doTestWriteOverFailoverWithDnFail(TestScenario.GRACEFUL_FAILOVER); } - @Test(timeout=30000) + @Test(timeout=60000) public void testWriteOverCrashFailoverWithDnFail() throws Exception { doTestWriteOverFailoverWithDnFail(TestScenario.ORIGINAL_ACTIVE_CRASHED); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index 0a262f899ab..9737d1d31f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -548,7 +548,7 @@ public void testSnapshotOpsOnReservedPath() throws Exception { * paths work and the NN can load the resulting edits. This test if for * snapshots at the root level. */ - @Test(timeout = 60000) + @Test(timeout = 120000) public void testSnapshotOpsOnRootReservedPath() throws Exception { Path dir = new Path("/"); Path sub = new Path(dir, "sub");