diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5866237e007..1452e4cc815 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -807,6 +807,9 @@ Release 2.8.0 - UNRELEASED HDFS-7433. Optimize performance of DatanodeManager's node map. (daryn via kihwal) + HDFS-8792. BlockManager#postponedMisreplicatedBlocks should use a + LightWeightHashSet to save memory (Yi Liu via Colin P. McCabe) + BUG FIXES HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index fa514914a1c..cde6588a2b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -85,6 +85,7 @@ import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; +import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.Node; @@ -95,7 +96,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -196,7 +196,8 @@ public int getPendingDataNodeMessageCount() { * notified of all block deletions that might have been pending * when the failover happened. */ - private final Set postponedMisreplicatedBlocks = Sets.newHashSet(); + private final LightWeightHashSet postponedMisreplicatedBlocks = + new LightWeightHashSet<>(); /** * Maps a StorageID to the set of blocks that are "extra" for this diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java index 55655ecfc5f..c7bf9a67f9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java @@ -527,12 +527,13 @@ public void printDetails(final PrintStream out) { } private class LinkedSetIterator implements Iterator { - /** The starting modification for fail-fast. */ - private final int startModification = modification; + /** The current modification epoch. */ + private int expectedModification = modification; /** The current index of the entry array. */ private int index = -1; /** The next element to return. */ private LinkedElement next = nextNonemptyEntry(); + private LinkedElement current; private LinkedElement nextNonemptyEntry() { for (index++; index < entries.length && entries[index] == null; index++); @@ -546,13 +547,14 @@ public boolean hasNext() { @Override public T next() { - if (modification != startModification) { + if (modification != expectedModification) { throw new ConcurrentModificationException("modification=" - + modification + " != startModification = " + startModification); + + modification + " != expectedModification = " + expectedModification); } if (next == null) { throw new NoSuchElementException(); } + current = next; final T e = next.element; // find the next element final LinkedElement n = next.next; @@ -562,7 +564,16 @@ public T next() { @Override public void remove() { - throw new UnsupportedOperationException("Remove is not supported."); + if (current == null) { + throw new NoSuchElementException(); + } + if (modification != expectedModification) { + throw new ConcurrentModificationException("modification=" + + modification + " != expectedModification = " + expectedModification); + } + LightWeightHashSet.this.removeElem(current.element); + current = null; + expectedModification = modification; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java index bb274834c3f..50af25582a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java @@ -190,6 +190,33 @@ public void testRemoveAll() { LOG.info("Test remove all - DONE"); } + @Test + public void testRemoveAllViaIterator() { + LOG.info("Test remove all via iterator"); + for (Integer i : list) { + assertTrue(set.add(i)); + } + for (Iterator iter = set.iterator(); iter.hasNext(); ) { + int e = iter.next(); + // element should be there before removing + assertTrue(set.contains(e)); + iter.remove(); + // element should not be there now + assertFalse(set.contains(e)); + } + + // the deleted elements should not be there + for (int i = 0; i < NUM; i++) { + assertFalse(set.contains(list.get(i))); + } + + // iterator should not have next + Iterator iter = set.iterator(); + assertFalse(iter.hasNext()); + assertTrue(set.isEmpty()); + LOG.info("Test remove all via iterator - DONE"); + } + @Test public void testPollAll() { LOG.info("Test poll all"); @@ -470,4 +497,4 @@ public boolean equals(Object obj) { } } -} \ No newline at end of file +}