From e43882e84ae44301eabd0122b5e5492da5fe9f66 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Mon, 9 Mar 2015 10:52:17 -0700 Subject: [PATCH] HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. Contributed by Tsz Wo Nicholas Sze. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/TestAppendSnapshotTruncate.java | 61 ++++++++++++------- .../server/namenode/TestFileTruncate.java | 11 +++- 3 files changed, 51 insertions(+), 24 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e106b1a2cd2..094abfe6aa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -734,6 +734,9 @@ Release 2.7.0 - UNRELEASED HDFS-7411. Change decommission logic to throttle by blocks rather than nodes in each interval. (Andrew Wang via cdouglas) + HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. + (Tsz Wo Nicholas Sze via jing9) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java index 5c4c7b4a279..e80e14f5655 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java @@ -41,10 +41,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate; import org.apache.hadoop.test.GenericTestUtils; @@ -69,6 +65,9 @@ public class TestAppendSnapshotTruncate { private static final int BLOCK_SIZE = 1024; private static final int DATANODE_NUM = 3; private static final short REPLICATION = 3; + private static final int FILE_WORKER_NUM = 3; + private static final long TEST_TIME_SECOND = 10; + private static final long TEST_TIMEOUT_SECOND = TEST_TIME_SECOND + 60; static final int SHORT_HEARTBEAT = 1; static final String[] EMPTY_STRINGS = {}; @@ -106,7 +105,7 @@ public class TestAppendSnapshotTruncate { /** Test randomly mixing append, snapshot and truncate operations. */ - @Test + @Test(timeout=TEST_TIMEOUT_SECOND*1000) public void testAST() throws Exception { final String dirPathString = "/dir"; final Path dir = new Path(dirPathString); @@ -121,12 +120,12 @@ public class TestAppendSnapshotTruncate { } localDir.mkdirs(); - final DirWorker w = new DirWorker(dir, localDir, 3); + final DirWorker w = new DirWorker(dir, localDir, FILE_WORKER_NUM); w.startAllFiles(); w.start(); - Worker.sleep(10L*1000); + Worker.sleep(TEST_TIME_SECOND * 1000); w.stop(); - w.stoptAllFiles(); + w.stopAllFiles(); w.checkEverything(); } @@ -259,7 +258,7 @@ public class TestAppendSnapshotTruncate { } } - void stoptAllFiles() throws InterruptedException { + void stopAllFiles() throws InterruptedException { for(FileWorker f : files) { f.stop(); } @@ -269,12 +268,12 @@ public class TestAppendSnapshotTruncate { LOG.info("checkEverything"); for(FileWorker f : files) { f.checkFullFile(); - Preconditions.checkState(f.state.get() != State.ERROR); + f.checkErrorState(); } for(String snapshot : snapshotPaths.keySet()) { checkSnapshot(snapshot); } - Preconditions.checkState(state.get() != State.ERROR); + checkErrorState(); } } @@ -364,7 +363,7 @@ public class TestAppendSnapshotTruncate { b.append(", newLength=").append(newLength) .append(", isReady=").append(isReady); if (!isReady) { - TestFileTruncate.checkBlockRecovery(file, dfs); + TestFileTruncate.checkBlockRecovery(file, dfs, 100, 300L); } return isReady; } @@ -407,6 +406,7 @@ public class TestAppendSnapshotTruncate { IDLE(false), RUNNING(false), STOPPED(true), ERROR(true); final boolean isTerminated; + State(boolean isTerminated) { this.isTerminated = isTerminated; } @@ -416,11 +416,29 @@ public class TestAppendSnapshotTruncate { final AtomicReference state = new AtomicReference(State.IDLE); final AtomicBoolean isCalling = new AtomicBoolean(); final AtomicReference thread = new AtomicReference(); - + + private Throwable thrown = null; + Worker(String name) { this.name = name; } + State checkErrorState() { + final State s = state.get(); + if (s == State.ERROR) { + throw new IllegalStateException(name + " has " + s, thrown); + } + return s; + } + + void setErrorState(Throwable t) { + checkErrorState(); + + LOG.error("Worker " + name + " failed.", t); + state.set(State.ERROR); + thrown = t; + } + void start() { Preconditions.checkState(state.compareAndSet(State.IDLE, State.RUNNING)); @@ -429,14 +447,13 @@ public class TestAppendSnapshotTruncate { @Override public void run() { final Random r = DFSUtil.getRandom(); - for(State s; (s = state.get()) == State.RUNNING || s == State.IDLE;) { + for(State s; !(s = checkErrorState()).isTerminated;) { if (s == State.RUNNING) { isCalling.set(true); try { LOG.info(call()); - } catch (Exception e) { - LOG.error("Worker " + name + " failed.", e); - state.set(State.ERROR); + } catch(Throwable t) { + setErrorState(t); return; } isCalling.set(false); @@ -451,7 +468,11 @@ public class TestAppendSnapshotTruncate { } boolean isPaused() { - return state.get() == State.IDLE && !isCalling.get(); + final State s = checkErrorState(); + if (s == State.STOPPED) { + throw new IllegalStateException(name + " is " + s); + } + return s == State.IDLE && !isCalling.get(); } void pause() { @@ -459,9 +480,7 @@ public class TestAppendSnapshotTruncate { } void stop() throws InterruptedException { - if (state.get() == State.ERROR) { - return; - } + checkErrorState(); state.set(State.STOPPED); thread.get().join(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 19b5cdedb73..b69d3456be0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -1151,8 +1151,13 @@ public class TestFileTruncate { public static void checkBlockRecovery(Path p, DistributedFileSystem dfs) throws IOException { + checkBlockRecovery(p, dfs, SUCCESS_ATTEMPTS, SLEEP); + } + + public static void checkBlockRecovery(Path p, DistributedFileSystem dfs, + int attempts, long sleepMs) throws IOException { boolean success = false; - for(int i = 0; i < SUCCESS_ATTEMPTS; i++) { + for(int i = 0; i < attempts; i++) { LocatedBlocks blocks = getLocatedBlocks(p, dfs); boolean noLastBlock = blocks.getLastLocatedBlock() == null; if(!blocks.isUnderConstruction() && @@ -1160,9 +1165,9 @@ public class TestFileTruncate { success = true; break; } - try { Thread.sleep(SLEEP); } catch (InterruptedException ignored) {} + try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {} } - assertThat("inode should complete in ~" + SLEEP * SUCCESS_ATTEMPTS + " ms.", + assertThat("inode should complete in ~" + sleepMs * attempts + " ms.", success, is(true)); }