From 16af8df41fd2a4bfaded2598efecd1b1e60dacfb Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Thu, 15 Oct 2015 22:29:17 -0400 Subject: [PATCH] HBASE-14625 Chaos Monkey should shut down faster --- .../hadoop/hbase/chaos/actions/Action.java | 9 ++++++ .../hbase/chaos/actions/AddColumnAction.java | 5 ++++ .../chaos/actions/BatchRestartRsAction.java | 30 +++++++++++++------ .../actions/ChangeBloomFilterAction.java | 4 +++ .../actions/ChangeCompressionAction.java | 5 ++++ .../chaos/actions/ChangeEncodingAction.java | 4 +++ .../chaos/actions/ChangeVersionsAction.java | 4 +++ .../actions/DecreaseMaxHFileSizeAction.java | 5 ++++ .../hbase/chaos/actions/FlushTableAction.java | 5 ++++ .../chaos/actions/ForceBalancerAction.java | 4 +++ ...rgeRandomAdjacentRegionsOfTableAction.java | 6 ++++ .../actions/MoveRegionsOfTableAction.java | 6 ++++ .../chaos/actions/RemoveColumnAction.java | 4 +++ .../actions/RestartActionBaseAction.java | 17 +++++++++++ .../actions/RollingBatchRestartRsAction.java | 2 +- .../chaos/actions/SnapshotTableAction.java | 5 ++++ .../actions/SplitAllRegionOfTableAction.java | 5 +++- .../SplitRandomRegionOfTableAction.java | 4 +++ .../chaos/actions/TruncateTableAction.java | 5 ++++ .../UnbalanceKillAndRebalanceAction.java | 15 ++++++++-- .../hadoop/hbase/chaos/policies/Policy.java | 15 ++++++++++ 21 files changed, 146 insertions(+), 13 deletions(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java index d632ce5bb83..fe140e29a76 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java @@ -210,6 +210,11 @@ public class Action { + " servers to " + toServers.size() + " different servers"); Admin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin(); for (byte[] victimRegion : victimRegions) { + // Don't keep moving regions if we're + // trying to stop the monkey. + if (context.isStopping()) { + break; + } int targetIx = RandomUtils.nextInt(toServers.size()); admin.move(victimRegion, Bytes.toBytes(toServers.get(targetIx).getServerName())); } @@ -249,5 +254,9 @@ public class Action { public HBaseCluster getHBaseCluster() { return util.getHBaseClusterInterface(); } + + public boolean isStopping() { + return false; + } } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java index e7d2e12ab4d..27268a4632a 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java @@ -53,6 +53,11 @@ public class AddColumnAction extends Action { columnDescriptor = new HColumnDescriptor(RandomStringUtils.randomAlphabetic(5)); } + // Don't try the modify if we're stopping + if (context.isStopping()) { + return; + } + LOG.debug("Performing action: Adding " + columnDescriptor + " to " + tableName); tableDescriptor.addFamily(columnDescriptor); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java index b6a5b502f94..ce660006b27 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.chaos.actions; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; @@ -41,29 +43,39 @@ public class BatchRestartRsAction extends RestartActionBaseAction { List selectedServers = PolicyBasedChaosMonkey.selectRandomItems(getCurrentServers(), ratio); - for (ServerName server : selectedServers) { - LOG.info("Killing region server:" + server); - cluster.killRegionServer(server); - } + Set killedServers = new HashSet(); for (ServerName server : selectedServers) { + // Don't keep killing servers if we're + // trying to stop the monkey. + if (context.isStopping()) { + break; + } + LOG.info("Killing region server:" + server); + cluster.killRegionServer(server); + killedServers.add(server); + } + + for (ServerName server : killedServers) { cluster.waitForRegionServerToStop(server, PolicyBasedChaosMonkey.TIMEOUT); } - LOG.info("Killed " + selectedServers.size() + " region servers. Reported num of rs:" + LOG.info("Killed " + killedServers.size() + " region servers. Reported num of rs:" + cluster.getClusterStatus().getServersSize()); sleep(sleepTime); - for (ServerName server : selectedServers) { + for (ServerName server : killedServers) { LOG.info("Starting region server:" + server.getHostname()); cluster.startRegionServer(server.getHostname(), server.getPort()); } - for (ServerName server : selectedServers) { - cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), PolicyBasedChaosMonkey.TIMEOUT); + for (ServerName server : killedServers) { + cluster.waitForRegionServerToStart(server.getHostname(), + server.getPort(), + PolicyBasedChaosMonkey.TIMEOUT); } - LOG.info("Started " + selectedServers.size() +" region servers. Reported num of rs:" + LOG.info("Started " + killedServers.size() +" region servers. Reported num of rs:" + cluster.getClusterStatus().getServersSize()); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java index 5bd7444a0da..684cd629863 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java @@ -73,6 +73,10 @@ public class ChangeBloomFilterAction extends Action { + descriptor.getNameAsString() + " of table " + tableName); } + // Don't try the modify if we're stopping + if (context.isStopping()) { + return; + } admin.modifyTable(tableName, tableDescriptor); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java index f048b588dbd..0d7f7aeafbf 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java @@ -74,6 +74,11 @@ public class ChangeCompressionAction extends Action { } } + // Don't try the modify if we're stopping + if (context.isStopping()) { + return; + } + admin.modifyTable(tableName, tableDescriptor); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java index b18aba598c3..c4553f15524 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java @@ -66,6 +66,10 @@ public class ChangeEncodingAction extends Action { + " to: " + descriptor.getDataBlockEncoding()); } + // Don't try the modify if we're stopping + if (context.isStopping()) { + return; + } admin.modifyTable(tableName, tableDescriptor); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java index 16f1cb03da9..76e152f19f1 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java @@ -61,6 +61,10 @@ public class ChangeVersionsAction extends Action { for(HColumnDescriptor descriptor:columnDescriptors) { descriptor.setVersions(versions, versions); } + // Don't try the modify if we're stopping + if (context.isStopping()) { + return; + } LOG.debug("Performing action: Changing versions on " + tableName.getNameAsString()); admin.modifyTable(tableName, tableDescriptor); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DecreaseMaxHFileSizeAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DecreaseMaxHFileSizeAction.java index 55a34f482cd..eaada1693bd 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DecreaseMaxHFileSizeAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DecreaseMaxHFileSizeAction.java @@ -68,6 +68,11 @@ public class DecreaseMaxHFileSizeAction extends Action { // Change the table descriptor. htd.setMaxFileSize(newValue); + // Don't try the modify if we're stopping + if (context.isStopping()) { + return; + } + // modify the table. admin.modifyTable(tableName, htd); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java index f86d2a13bdd..ddce57e8b53 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java @@ -43,6 +43,11 @@ public class FlushTableAction extends Action { HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); Admin admin = util.getHBaseAdmin(); + // Don't try the flush if we're stopping + if (context.isStopping()) { + return; + } + LOG.info("Performing action: Flush table " + tableName); try { admin.flush(tableName); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java index 9909c6ecf1b..04f389a9519 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java @@ -24,6 +24,10 @@ package org.apache.hadoop.hbase.chaos.actions; public class ForceBalancerAction extends Action { @Override public void perform() throws Exception { + // Don't try the flush if we're stopping + if (context.isStopping()) { + return; + } LOG.info("Balancing regions"); forceBalancer(); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java index 0aaefe598c8..8645dc4c940 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java @@ -58,6 +58,12 @@ public class MergeRandomAdjacentRegionsOfTableAction extends Action { HRegionInfo a = regions.get(i++); HRegionInfo b = regions.get(i); LOG.debug("Merging " + a.getRegionNameAsString() + " and " + b.getRegionNameAsString()); + + // Don't try the merge if we're stopping + if (context.isStopping()) { + return; + } + try { admin.mergeRegions(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false); } catch (Exception ex) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java index 5e380a33a0f..d5f0e9652bf 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java @@ -69,6 +69,12 @@ public class MoveRegionsOfTableAction extends Action { long start = System.currentTimeMillis(); for (HRegionInfo regionInfo:regions) { + + // Don't try the move if we're stopping + if (context.isStopping()) { + return; + } + try { String destServerName = servers[RandomUtils.nextInt(servers.length)].getServerName(); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java index c083d9c12a4..20bdaa3c1d4 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java @@ -68,6 +68,10 @@ public class RemoveColumnAction extends Action { + tableName.getNameAsString()); tableDescriptor.removeFamily(colDescName); + // Don't try the modify if we're stopping + if (context.isStopping()) { + return; + } admin.modifyTable(tableName, tableDescriptor); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java index 3f209da945f..63286cb1f9a 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java @@ -40,6 +40,11 @@ public class RestartActionBaseAction extends Action { void restartMaster(ServerName server, long sleepTime) throws IOException { sleepTime = Math.max(sleepTime, 1000); + // Don't try the kill if we're stopping + if (context.isStopping()) { + return; + } + killMaster(server); sleep(sleepTime); startMaster(server); @@ -47,6 +52,10 @@ public class RestartActionBaseAction extends Action { void restartRs(ServerName server, long sleepTime) throws IOException { sleepTime = Math.max(sleepTime, 1000); + // Don't try the kill if we're stopping + if (context.isStopping()) { + return; + } killRs(server); sleep(sleepTime); startRs(server); @@ -54,6 +63,10 @@ public class RestartActionBaseAction extends Action { void restartZKNode(ServerName server, long sleepTime) throws IOException { sleepTime = Math.max(sleepTime, 1000); + // Don't try the kill if we're stopping + if (context.isStopping()) { + return; + } killZKNode(server); sleep(sleepTime); startZKNode(server); @@ -61,6 +74,10 @@ public class RestartActionBaseAction extends Action { void restartDataNode(ServerName server, long sleepTime) throws IOException { sleepTime = Math.max(sleepTime, 1000); + // Don't try the kill if we're stopping + if (context.isStopping()) { + return; + } killDataNode(server); sleep(sleepTime); startDataNode(server); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java index d481b58121b..e79ff5b5739 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java @@ -63,7 +63,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction { Queue deadServers = new LinkedList(); // loop while there are servers to be killed or dead servers to be restarted - while (!serversToBeKilled.isEmpty() || !deadServers.isEmpty()) { + while ((!serversToBeKilled.isEmpty() || !deadServers.isEmpty()) && !context.isStopping()) { KillOrStart action = KillOrStart.KILL; if (serversToBeKilled.isEmpty()) { // no more servers to kill diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java index 1c7a9d9428f..15b8e86feec 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java @@ -44,6 +44,11 @@ public class SnapshotTableAction extends Action { String snapshotName = tableName + "-it-" + System.currentTimeMillis(); Admin admin = util.getHBaseAdmin(); + // Don't try the snapshot if we're stopping + if (context.isStopping()) { + return; + } + LOG.info("Performing action: Snapshot table " + tableName); admin.snapshot(snapshotName, tableName); if (sleepTime > 0) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java index 6e7d0f91fcf..3c76ebfb662 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java @@ -34,7 +34,10 @@ public class SplitAllRegionOfTableAction extends Action { public void perform() throws Exception { HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); Admin admin = util.getHBaseAdmin(); - + // Don't try the split if we're stopping + if (context.isStopping()) { + return; + } LOG.info("Performing action: Split all regions of " + tableName); admin.split(tableName); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java index 209ef2475d8..25c80e918cf 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java @@ -53,6 +53,10 @@ public class SplitRandomRegionOfTableAction extends Action { LOG.info("Table " + tableName + " doesn't have regions to split"); return; } + // Don't try the split if we're stopping + if (context.isStopping()) { + return; + } HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem( regions.toArray(new HRegionInfo[regions.size()])); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java index 05a72cfeb11..2a4871d3e72 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java @@ -42,6 +42,11 @@ public class TruncateTableAction extends Action { HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); Admin admin = util.getHBaseAdmin(); + // Don't try the truncate if we're stopping + if (context.isStopping()) { + return; + } + boolean preserveSplits = random.nextBoolean(); LOG.info("Performing action: Truncate table " + tableName.getNameAsString() + "preserve splits " + preserveSplits); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java index a97a9c49419..1ac14589f81 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java @@ -19,8 +19,10 @@ package org.apache.hadoop.hbase.chaos.actions; import java.util.ArrayList; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; +import java.util.Set; import org.apache.commons.lang.math.RandomUtils; import org.apache.hadoop.hbase.ClusterStatus; @@ -51,6 +53,8 @@ public class UnbalanceKillAndRebalanceAction extends Action { public void perform() throws Exception { ClusterStatus status = this.cluster.getClusterStatus(); List victimServers = new LinkedList(status.getServers()); + Set killedServers = new HashSet(); + int liveCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_LIVE * victimServers.size()); int deadCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_DIE * victimServers.size()); Assert.assertTrue((liveCount + deadCount) < victimServers.size()); @@ -62,13 +66,20 @@ public class UnbalanceKillAndRebalanceAction extends Action { unbalanceRegions(status, victimServers, targetServers, HOARD_FRC_OF_REGIONS); Thread.sleep(waitForUnbalanceMilliSec); for (int i = 0; i < liveCount; ++i) { + // Don't keep killing servers if we're + // trying to stop the monkey. + if (context.isStopping()) { + break; + } killRs(targetServers.get(i)); + killedServers.add(targetServers.get(i)); } + Thread.sleep(waitForKillsMilliSec); forceBalancer(); Thread.sleep(waitAfterBalanceMilliSec); - for (int i = 0; i < liveCount; ++i) { - startRs(targetServers.get(i)); + for (ServerName server:killedServers) { + startRs(server); } } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/Policy.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/Policy.java index e60d24e591a..6b365f81a51 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/Policy.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/Policy.java @@ -35,14 +35,29 @@ public abstract class Policy extends StoppableImplementation implements Runnable public void init(PolicyContext context) throws Exception { this.context = context; + + // Used to wire up stopping. + context.setPolicy(this); } /** * A context for a Policy */ public static class PolicyContext extends Action.ActionContext { + + Policy policy = null; + public PolicyContext(IntegrationTestingUtility util) { super(util); } + + @Override + public boolean isStopping() { + return policy.isStopped(); + } + + public void setPolicy(Policy policy) { + this.policy = policy; + } } }