HDFS-4366. Block Replication Policy Implementation May Skip Higher-Priority Blocks for Lower-Priority Blocks. Contributed by Derek Dagit.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1510724 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2013-08-05 20:48:59 +00:00
parent c4fb095d4a
commit ccdb978603
6 changed files with 325 additions and 42 deletions

View File

@ -262,6 +262,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5035. getFileLinkStatus and rename do not correctly check permissions HDFS-5035. getFileLinkStatus and rename do not correctly check permissions
of symlinks. (Andrew Wang via Colin Patrick McCabe) of symlinks. (Andrew Wang via Colin Patrick McCabe)
HDFS-4366. Block Replication Policy Implementation May Skip Higher-Priority
Blocks for Lower-Priority Blocks (Derek Dagit via kihwal)
Release 2.1.1-beta - UNRELEASED Release 2.1.1-beta - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -1208,7 +1208,6 @@ public class BlockManager {
// abandoned block or block reopened for append // abandoned block or block reopened for append
if(bc == null || bc instanceof MutableBlockCollection) { if(bc == null || bc instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
continue; continue;
} }
@ -1235,7 +1234,6 @@ public class BlockManager {
if ( (pendingReplications.getNumReplicas(block) > 0) || if ( (pendingReplications.getNumReplicas(block) > 0) ||
(blockHasEnoughRacks(block)) ) { (blockHasEnoughRacks(block)) ) {
neededReplications.remove(block, priority); // remove from neededReplications neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
blockLog.info("BLOCK* Removing " + block blockLog.info("BLOCK* Removing " + block
+ " from neededReplications as it has enough replicas"); + " from neededReplications as it has enough replicas");
continue; continue;
@ -1295,7 +1293,6 @@ public class BlockManager {
if(bc == null || bc instanceof MutableBlockCollection) { if(bc == null || bc instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications neededReplications.remove(block, priority); // remove from neededReplications
rw.targets = null; rw.targets = null;
neededReplications.decrementReplicationIndex(priority);
continue; continue;
} }
requiredReplication = bc.getBlockReplication(); requiredReplication = bc.getBlockReplication();
@ -1309,7 +1306,6 @@ public class BlockManager {
if ( (pendingReplications.getNumReplicas(block) > 0) || if ( (pendingReplications.getNumReplicas(block) > 0) ||
(blockHasEnoughRacks(block)) ) { (blockHasEnoughRacks(block)) ) {
neededReplications.remove(block, priority); // remove from neededReplications neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
rw.targets = null; rw.targets = null;
blockLog.info("BLOCK* Removing " + block blockLog.info("BLOCK* Removing " + block
+ " from neededReplications as it has enough replicas"); + " from neededReplications as it has enough replicas");
@ -1346,7 +1342,6 @@ public class BlockManager {
// remove from neededReplications // remove from neededReplications
if(numEffectiveReplicas + targets.length >= requiredReplication) { if(numEffectiveReplicas + targets.length >= requiredReplication) {
neededReplications.remove(block, priority); // remove from neededReplications neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
} }
} }
} }

View File

@ -18,11 +18,8 @@
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -82,16 +79,12 @@ class UnderReplicatedBlocks implements Iterable<Block> {
static final int QUEUE_WITH_CORRUPT_BLOCKS = 4; static final int QUEUE_WITH_CORRUPT_BLOCKS = 4;
/** the queues themselves */ /** the queues themselves */
private List<LightWeightLinkedSet<Block>> priorityQueues private List<LightWeightLinkedSet<Block>> priorityQueues
= new ArrayList<LightWeightLinkedSet<Block>>(); = new ArrayList<LightWeightLinkedSet<Block>>(LEVEL);
/** Stores the replication index for each priority */
private Map<Integer, Integer> priorityToReplIdx = new HashMap<Integer, Integer>(LEVEL);
/** Create an object. */ /** Create an object. */
UnderReplicatedBlocks() { UnderReplicatedBlocks() {
for (int i = 0; i < LEVEL; i++) { for (int i = 0; i < LEVEL; i++) {
priorityQueues.add(new LightWeightLinkedSet<Block>()); priorityQueues.add(new LightWeightLinkedSet<Block>());
priorityToReplIdx.put(i, 0);
} }
} }
@ -310,13 +303,15 @@ class UnderReplicatedBlocks implements Iterable<Block> {
/** /**
* Get a list of block lists to be replicated. The index of block lists * Get a list of block lists to be replicated. The index of block lists
* represents its replication priority. Replication index will be tracked for * represents its replication priority. Iterates each block list in priority
* each priority list separately in priorityToReplIdx map. Iterates through * order beginning with the highest priority list. Iterators use a bookmark to
* all priority lists and find the elements after replication index. Once the * resume where the previous iteration stopped. Returns when the block count
* last priority lists reaches to end, all replication indexes will be set to * is met or iteration reaches the end of the lowest priority list, in which
* 0 and start from 1st priority list to fulfill the blockToProces count. * case bookmarks for each block list are reset to the heads of their
* * respective lists.
* @param blocksToProcess - number of blocks to fetch from underReplicated blocks. *
* @param blocksToProcess - number of blocks to fetch from underReplicated
* blocks.
* @return Return a list of block lists to be replicated. The block list index * @return Return a list of block lists to be replicated. The block list index
* represents its replication priority. * represents its replication priority.
*/ */
@ -336,12 +331,8 @@ class UnderReplicatedBlocks implements Iterable<Block> {
for (int priority = 0; priority < LEVEL; priority++) { for (int priority = 0; priority < LEVEL; priority++) {
// Go through all blocks that need replications with current priority. // Go through all blocks that need replications with current priority.
BlockIterator neededReplicationsIterator = iterator(priority); BlockIterator neededReplicationsIterator = iterator(priority);
Integer replIndex = priorityToReplIdx.get(priority); // Set the iterator to the first unprocessed block at this priority level.
neededReplicationsIterator.setToBookmark();
// skip to the first unprocessed block, which is at replIndex
for (int i = 0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
neededReplicationsIterator.next();
}
blocksToProcess = Math.min(blocksToProcess, size()); blocksToProcess = Math.min(blocksToProcess, size());
@ -354,20 +345,18 @@ class UnderReplicatedBlocks implements Iterable<Block> {
&& neededReplicationsIterator.hasNext()) { && neededReplicationsIterator.hasNext()) {
Block block = neededReplicationsIterator.next(); Block block = neededReplicationsIterator.next();
blocksToReplicate.get(priority).add(block); blocksToReplicate.get(priority).add(block);
replIndex++;
blockCount++; blockCount++;
} }
if (!neededReplicationsIterator.hasNext() if (!neededReplicationsIterator.hasNext()
&& neededReplicationsIterator.getPriority() == LEVEL - 1) { && neededReplicationsIterator.getPriority() == LEVEL - 1) {
// reset all priorities replication index to 0 because there is no // Reset all priorities' bookmarks to the beginning because there were
// recently added blocks in any list. // no recently added blocks in any list.
for (int i = 0; i < LEVEL; i++) { for (int i = 0; i < LEVEL; i++) {
priorityToReplIdx.put(i, 0); this.priorityQueues.get(i).resetBookmark();
} }
break; break;
} }
priorityToReplIdx.put(priority, replIndex);
} }
return blocksToReplicate; return blocksToReplicate;
} }
@ -450,15 +439,19 @@ class UnderReplicatedBlocks implements Iterable<Block> {
int getPriority() { int getPriority() {
return level; return level;
} }
}
/** /**
* This method is to decrement the replication index for the given priority * Sets iterator(s) to bookmarked elements.
* */
* @param priority - int priority level private synchronized void setToBookmark() {
*/ if (this.isIteratorForLevel) {
public void decrementReplicationIndex(int priority) { this.iterators.set(0, priorityQueues.get(this.level)
Integer replIdx = priorityToReplIdx.get(priority); .getBookmark());
priorityToReplIdx.put(priority, --replIdx); } else {
for(int i=0; i<LEVEL; i++) {
this.iterators.set(i, priorityQueues.get(i).getBookmark());
}
}
}
} }
} }

View File

@ -56,6 +56,8 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
private DoubleLinkedElement<T> head; private DoubleLinkedElement<T> head;
private DoubleLinkedElement<T> tail; private DoubleLinkedElement<T> tail;
private LinkedSetIterator bookmark;
/** /**
* @param initCapacity * @param initCapacity
* Recommended size of the internal array. * Recommended size of the internal array.
@ -69,6 +71,7 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
super(initCapacity, maxLoadFactor, minLoadFactor); super(initCapacity, maxLoadFactor, minLoadFactor);
head = null; head = null;
tail = null; tail = null;
bookmark = new LinkedSetIterator();
} }
public LightWeightLinkedSet() { public LightWeightLinkedSet() {
@ -111,6 +114,12 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
tail = le; tail = le;
if (head == null) { if (head == null) {
head = le; head = le;
bookmark.next = head;
}
// Update bookmark, if necessary.
if (bookmark.next == null) {
bookmark.next = le;
} }
return true; return true;
} }
@ -141,6 +150,11 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
if (tail == found) { if (tail == found) {
tail = tail.before; tail = tail.before;
} }
// Update bookmark, if necessary.
if (found == this.bookmark.next) {
this.bookmark.next = found.after;
}
return found; return found;
} }
@ -262,5 +276,25 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
super.clear(); super.clear();
this.head = null; this.head = null;
this.tail = null; this.tail = null;
this.resetBookmark();
}
/**
* Returns a new iterator starting at the bookmarked element.
*
* @return the iterator to the bookmarked element.
*/
public Iterator<T> getBookmark() {
LinkedSetIterator toRet = new LinkedSetIterator();
toRet.next = this.bookmark.next;
this.bookmark = toRet;
return toRet;
}
/**
* Resets the bookmark to the beginning of the list.
*/
public void resetBookmark() {
this.bookmark.next = this.head;
} }
} }

View File

@ -21,8 +21,11 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File; import java.io.File;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
@ -31,6 +34,7 @@ import java.util.Map;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
@ -40,9 +44,13 @@ import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -1004,4 +1012,185 @@ public class TestReplicationPolicy {
exception.expect(IllegalArgumentException.class); exception.expect(IllegalArgumentException.class);
blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf); blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
} }
}
@Test(timeout = 60000)
public void testUpdateDoesNotCauseSkippedReplication() {
UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks();
Block block1 = new Block(random.nextLong());
Block block2 = new Block(random.nextLong());
Block block3 = new Block(random.nextLong());
// Adding QUEUE_VERY_UNDER_REPLICATED block
final int block1CurReplicas = 2;
final int block1ExpectedReplicas = 7;
underReplicatedBlocks.add(block1, block1CurReplicas, 0,
block1ExpectedReplicas);
// Adding QUEUE_VERY_UNDER_REPLICATED block
underReplicatedBlocks.add(block2, 2, 0, 7);
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block3, 2, 0, 6);
List<List<Block>> chosenBlocks;
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0);
// Increasing the replications will move the block down a
// priority. This simulates a replica being completed in between checks.
underReplicatedBlocks.update(block1, block1CurReplicas+1, 0,
block1ExpectedReplicas, 1, 0);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
// This block was moved up a priority and should not be skipped over.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 0, 0, 1, 0, 0);
}
@Test(timeout = 60000)
public void testAddStoredBlockDoesNotCauseSkippedReplication()
throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.isPopulatingReplQueues()).thenReturn(true);
when(mockNS.hasWriteLock()).thenReturn(true);
FSClusterStats mockStats = mock(FSClusterStats.class);
BlockManager bm =
new BlockManager(mockNS, mockStats, new HdfsConfiguration());
UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
Block block1 = new Block(random.nextLong());
Block block2 = new Block(random.nextLong());
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block1, 0, 1, 1);
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block2, 0, 1, 1);
List<List<Block>> chosenBlocks;
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
// Adding this block collection to the BlockManager, so that when we add the
// block under construction, the BlockManager will realize the expected
// replication has been achieved and remove it from the under-replicated
// queue.
BlockInfoUnderConstruction info = new BlockInfoUnderConstruction(block1, 1);
BlockCollection bc = mock(BlockCollection.class);
when(bc.getBlockReplication()).thenReturn((short)1);
bm.addBlockCollection(info, bc);
// Adding this block will increase its current replication, and that will
// remove it from the queue.
bm.addStoredBlockUnderConstruction(info,
TestReplicationPolicy.dataNodes[0], ReplicaState.FINALIZED);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
// This block remains and should not be skipped over.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
@Test(timeout = 60000)
public void
testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication()
throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.isPopulatingReplQueues()).thenReturn(true);
FSClusterStats mockStats = mock(FSClusterStats.class);
BlockManager bm =
new BlockManager(mockNS, mockStats, new HdfsConfiguration());
UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
Block block1 = new Block(random.nextLong());
Block block2 = new Block(random.nextLong());
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block1, 0, 1, 1);
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block2, 0, 1, 1);
List<List<Block>> chosenBlocks;
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
final BlockInfo info = new BlockInfo(block1, 1);
final MutableBlockCollection mbc = mock(MutableBlockCollection.class);
when(mbc.getLastBlock()).thenReturn(info);
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
when(mbc.getBlockReplication()).thenReturn((short)1);
ContentSummary cs = mock(ContentSummary.class);
when(cs.getLength()).thenReturn((long)1);
when(mbc.computeContentSummary()).thenReturn(cs);
info.setBlockCollection(mbc);
bm.addBlockCollection(info, mbc);
DatanodeDescriptor[] dnAry = {dataNodes[0]};
final BlockInfoUnderConstruction ucBlock =
info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION,
dnAry);
when(mbc.setLastBlock((BlockInfo) any(), (DatanodeDescriptor[]) any()))
.thenReturn(ucBlock);
bm.convertLastBlockToUnderConstruction(mbc);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
// This block remains and should not be skipped over.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
@Test(timeout = 60000)
public void testupdateNeededReplicationsDoesNotCauseSkippedReplication()
throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.isPopulatingReplQueues()).thenReturn(true);
FSClusterStats mockStats = mock(FSClusterStats.class);
BlockManager bm =
new BlockManager(mockNS, mockStats, new HdfsConfiguration());
UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
Block block1 = new Block(random.nextLong());
Block block2 = new Block(random.nextLong());
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block1, 0, 1, 1);
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block2, 0, 1, 1);
List<List<Block>> chosenBlocks;
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
bm.setReplication((short)0, (short)1, "", block1);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
// This block remains and should not be skipped over.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
}

View File

@ -325,10 +325,19 @@ public class TestLightWeightLinkedSet {
assertEquals(NUM, set.size()); assertEquals(NUM, set.size());
assertFalse(set.isEmpty()); assertFalse(set.isEmpty());
// Advance the bookmark.
Iterator<Integer> bkmrkIt = set.getBookmark();
for (int i=0; i<set.size()/2+1; i++) {
bkmrkIt.next();
}
assertTrue(bkmrkIt.hasNext());
// clear the set // clear the set
set.clear(); set.clear();
assertEquals(0, set.size()); assertEquals(0, set.size());
assertTrue(set.isEmpty()); assertTrue(set.isEmpty());
bkmrkIt = set.getBookmark();
assertFalse(bkmrkIt.hasNext());
// poll should return an empty list // poll should return an empty list
assertEquals(0, set.pollAll().size()); assertEquals(0, set.pollAll().size());
@ -363,4 +372,64 @@ public class TestLightWeightLinkedSet {
LOG.info("Test capacity - DONE"); LOG.info("Test capacity - DONE");
} }
@Test(timeout=60000)
public void testGetBookmarkReturnsBookmarkIterator() {
LOG.info("Test getBookmark returns proper iterator");
assertTrue(set.addAll(list));
Iterator<Integer> bookmark = set.getBookmark();
assertEquals(bookmark.next(), list.get(0));
final int numAdvance = list.size()/2;
for(int i=1; i<numAdvance; i++) {
bookmark.next();
}
Iterator<Integer> bookmark2 = set.getBookmark();
assertEquals(bookmark2.next(), list.get(numAdvance));
}
@Test(timeout=60000)
public void testBookmarkAdvancesOnRemoveOfSameElement() {
LOG.info("Test that the bookmark advances if we remove its element.");
assertTrue(set.add(list.get(0)));
assertTrue(set.add(list.get(1)));
assertTrue(set.add(list.get(2)));
Iterator<Integer> it = set.getBookmark();
assertEquals(it.next(), list.get(0));
set.remove(list.get(1));
it = set.getBookmark();
assertEquals(it.next(), list.get(2));
}
@Test(timeout=60000)
public void testBookmarkSetToHeadOnAddToEmpty() {
LOG.info("Test bookmark is set after adding to previously empty set.");
Iterator<Integer> it = set.getBookmark();
assertFalse(it.hasNext());
set.add(list.get(0));
set.add(list.get(1));
it = set.getBookmark();
assertTrue(it.hasNext());
assertEquals(it.next(), list.get(0));
assertEquals(it.next(), list.get(1));
assertFalse(it.hasNext());
}
@Test(timeout=60000)
public void testResetBookmarkPlacesBookmarkAtHead() {
set.addAll(list);
Iterator<Integer> it = set.getBookmark();
final int numAdvance = set.size()/2;
for (int i=0; i<numAdvance; i++) {
it.next();
}
assertEquals(it.next(), list.get(numAdvance));
set.resetBookmark();
it = set.getBookmark();
assertEquals(it.next(), list.get(0));
}
} }