s3 from ec2 seems to be more sensitive. Allow test setup deletion to try twice instead of failing

git-svn-id: http://jclouds.googlecode.com/svn/trunk@1477 3d8758e0-26b5-11de-8745-db77d3ebf521
This commit is contained in:
adrian.f.cole 2009-06-27 20:37:21 +00:00
parent 9c84796aa3
commit d5f6cc6f21
1 changed files with 9 additions and 2 deletions

View File

@ -288,11 +288,19 @@ public class S3IntegrationTest {
private static final BlockingQueue<String> bucketNames = new ArrayBlockingQueue<String>(
bucketCount);
/**
* There are a lot of retries here mainly from experience running inside amazon EC2.
*/
@BeforeGroups(dependsOnMethods = { "setUpClient" }, groups = { "integration", "live" })
public void setUpBuckets(ITestContext context) throws Exception {
synchronized (bucketNames) {
if (bucketNames.peek() == null) {
this.deleteEverything();
// try twice to delete everything
try {
deleteEverything();
} catch (AssertionError e) {
deleteEverything();
}
for (; bucketIndex < bucketCount; bucketIndex++) {
String bucketName = bucketPrefix + bucketIndex;
try {
@ -330,7 +338,6 @@ public class S3IntegrationTest {
if (metaDatum.getName().startsWith(bucketPrefix.toLowerCase())) {
deleteBucket(metaDatum.getName());
}
}
} catch (CancellationException e) {
throw e;