HDFS-4879. Add BlockedArrayList collection to avoid CMS full GCs (Contributed by Todd Lipcon)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1520669 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2013-09-06 19:09:41 +00:00
parent 4cb86bf2db
commit 36ed17388e
7 changed files with 281 additions and 15 deletions

View File

@ -31,6 +31,9 @@ Release 2.3.0 - UNRELEASED
HDFS-4491. Parallel testing HDFS. (Andrey Klochkov via cnauroth) HDFS-4491. Parallel testing HDFS. (Andrey Klochkov via cnauroth)
HDFS-4879. Add "blocked ArrayList" collection to avoid CMS full GCs
(Todd Lipcon via Colin Patrick McCabe)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapsho
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
import org.apache.hadoop.hdfs.util.ByteArray; import org.apache.hadoop.hdfs.util.ByteArray;
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -949,7 +950,7 @@ public class FSDirectory implements Closeable {
if (removedDst != null) { if (removedDst != null) {
undoRemoveDst = false; undoRemoveDst = false;
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ArrayList<INode>(); List<INode> removedINodes = new ChunkedArrayList<INode>();
filesDeleted = removedDst.cleanSubtree(null, filesDeleted = removedDst.cleanSubtree(null,
dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true) dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true)
.get(Quota.NAMESPACE); .get(Quota.NAMESPACE);
@ -1363,7 +1364,7 @@ public class FSDirectory implements Closeable {
QuotaExceededException, SnapshotAccessControlException { QuotaExceededException, SnapshotAccessControlException {
assert hasWriteLock(); assert hasWriteLock();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ArrayList<INode>(); List<INode> removedINodes = new ChunkedArrayList<INode>();
final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
normalizePath(src), false); normalizePath(src), false);

View File

@ -23,7 +23,6 @@ import java.io.File;
import java.io.FilterInputStream; import java.io.FilterInputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumMap; import java.util.EnumMap;
import java.util.List; import java.util.List;
@ -77,6 +76,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.hdfs.util.Holder;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
@ -584,7 +584,7 @@ public class FSEditLogLoader {
case OP_DELETE_SNAPSHOT: { case OP_DELETE_SNAPSHOT: {
DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op; DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op;
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ArrayList<INode>(); List<INode> removedINodes = new ChunkedArrayList<INode>();
fsNamesys.getSnapshotManager().deleteSnapshot( fsNamesys.getSnapshotManager().deleteSnapshot(
deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName, deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName,
collectedBlocks, removedINodes); collectedBlocks, removedINodes);

View File

@ -201,6 +201,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RetryCache; import org.apache.hadoop.ipc.RetryCache;
@ -3113,7 +3114,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throws AccessControlException, SafeModeException, UnresolvedLinkException, throws AccessControlException, SafeModeException, UnresolvedLinkException,
IOException { IOException {
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ArrayList<INode>(); List<INode> removedINodes = new ChunkedArrayList<INode>();
FSPermissionChecker pc = getPermissionChecker(); FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
@ -3167,21 +3168,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* of blocks that need to be removed from blocksMap * of blocks that need to be removed from blocksMap
*/ */
void removeBlocks(BlocksMapUpdateInfo blocks) { void removeBlocks(BlocksMapUpdateInfo blocks) {
int start = 0;
int end = 0;
List<Block> toDeleteList = blocks.getToDeleteList(); List<Block> toDeleteList = blocks.getToDeleteList();
while (start < toDeleteList.size()) { Iterator<Block> iter = toDeleteList.iterator();
end = BLOCK_DELETION_INCREMENT + start; while (iter.hasNext()) {
end = end > toDeleteList.size() ? toDeleteList.size() : end;
writeLock(); writeLock();
try { try {
for (int i = start; i < end; i++) { for (int i = 0; i < BLOCK_DELETION_INCREMENT && iter.hasNext(); i++) {
blockManager.removeBlock(toDeleteList.get(i)); blockManager.removeBlock(iter.next());
} }
} finally { } finally {
writeUnlock(); writeUnlock();
} }
start = end;
} }
} }
@ -6761,7 +6758,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkOwner(pc, snapshotRoot); checkOwner(pc, snapshotRoot);
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ArrayList<INode>(); List<INode> removedINodes = new ChunkedArrayList<INode>();
dir.writeLock(); dir.writeLock();
try { try {
snapshotManager.deleteSnapshot(snapshotRoot, snapshotName, snapshotManager.deleteSnapshot(snapshotRoot, snapshotName,

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
import org.apache.hadoop.hdfs.util.Diff; import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -707,7 +708,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
} }
public BlocksMapUpdateInfo() { public BlocksMapUpdateInfo() {
toDeleteList = new ArrayList<Block>(); toDeleteList = new ChunkedArrayList<Block>();
} }
/** /**

View File

@ -0,0 +1,171 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.util.AbstractList;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
/**
* Simplified List implementation which stores elements as a list
* of chunks, each chunk having a maximum size. This improves over
* using an ArrayList in that creating a large list will never require
* a large amount of contiguous heap space -- thus reducing the likelihood
* of triggering a CMS compaction pause due to heap fragmentation.
*
* The first chunks allocated are small, but each additional chunk is
* 50% larger than the previous, ramping up to a configurable maximum
* chunk size. Reasonable defaults are provided which should be a good
* balance between not making any large allocations while still retaining
* decent performance.
*
* This currently only supports a small subset of List operations --
* namely addition and iteration.
*/
@InterfaceAudience.Private
public class ChunkedArrayList<T> extends AbstractList<T> {
/**
* The chunks which make up the full list.
*/
private final List<List<T>> chunks = Lists.newArrayList();
/**
* Cache of the last element in the 'chunks' array above.
* This speeds up the add operation measurably.
*/
private List<T> lastChunk = null;
/**
* The capacity with which the last chunk was allocated.
*/
private int lastChunkCapacity;
/**
* The capacity of the first chunk to allocate in a cleared list.
*/
private final int initialChunkCapacity;
/**
* The maximum number of elements for any chunk.
*/
private final int maxChunkSize;
/**
* Total number of elements in the list.
*/
private int size;
/**
* Default initial size is 6 elements, since typical minimum object
* size is 64 bytes, and this leaves enough space for the object
* header.
*/
private static final int DEFAULT_INITIAL_CHUNK_CAPACITY = 6;
/**
* Default max size is 8K elements - which, at 8 bytes per element
* should be about 64KB -- small enough to easily fit in contiguous
* free heap space even with a fair amount of fragmentation.
*/
private static final int DEFAULT_MAX_CHUNK_SIZE = 8*1024;
public ChunkedArrayList() {
this(DEFAULT_INITIAL_CHUNK_CAPACITY, DEFAULT_MAX_CHUNK_SIZE);
}
/**
* @param initialChunkCapacity the capacity of the first chunk to be
* allocated
* @param maxChunkSize the maximum size of any chunk allocated
*/
public ChunkedArrayList(int initialChunkCapacity, int maxChunkSize) {
Preconditions.checkArgument(maxChunkSize >= initialChunkCapacity);
this.initialChunkCapacity = initialChunkCapacity;
this.maxChunkSize = maxChunkSize;
}
@Override
public Iterator<T> iterator() {
return Iterables.concat(chunks).iterator();
}
@Override
public boolean add(T e) {
if (lastChunk == null) {
addChunk(initialChunkCapacity);
} else if (lastChunk.size() >= lastChunkCapacity) {
int newCapacity = lastChunkCapacity + (lastChunkCapacity >> 1);
addChunk(Math.min(newCapacity, maxChunkSize));
}
size++;
return lastChunk.add(e);
}
@Override
public void clear() {
chunks.clear();
lastChunk = null;
lastChunkCapacity = 0;
size = 0;
}
private void addChunk(int capacity) {
lastChunk = Lists.newArrayListWithCapacity(capacity);
chunks.add(lastChunk);
lastChunkCapacity = capacity;
}
@Override
public boolean isEmpty() {
return size == 0;
}
@Override
public int size() {
return size;
}
@VisibleForTesting
int getNumChunks() {
return chunks.size();
}
@VisibleForTesting
int getMaxChunkSize() {
int size = 0;
for (List<T> chunk : chunks) {
size = Math.max(size, chunk.size());
}
return size;
}
@Override
public T get(int arg0) {
throw new UnsupportedOperationException(
this.getClass().getName() + " does not support random access");
}
}

View File

@ -0,0 +1,93 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import static org.junit.Assert.*;
import java.util.ArrayList;
import org.junit.Test;
import com.google.common.base.Stopwatch;
public class TestChunkedArrayList {
@Test
public void testBasics() {
final int N_ELEMS = 100000;
ChunkedArrayList<Integer> l = new ChunkedArrayList<Integer>();
assertTrue(l.isEmpty());
// Insert a bunch of elements.
for (int i = 0; i < N_ELEMS; i++) {
l.add(i);
}
assertFalse(l.isEmpty());
assertEquals(N_ELEMS, l.size());
// Check that it got chunked.
assertTrue(l.getNumChunks() > 10);
assertEquals(8192, l.getMaxChunkSize());
}
@Test
public void testIterator() {
ChunkedArrayList<Integer> l = new ChunkedArrayList<Integer>();
for (int i = 0; i < 30000; i++) {
l.add(i);
}
int i = 0;
for (int fromList : l) {
assertEquals(i, fromList);
i++;
}
}
@Test
public void testPerformance() {
String obj = "hello world";
final int numElems = 1000000;
final int numTrials = 5;
for (int trial = 0; trial < numTrials; trial++) {
System.gc();
{
ArrayList<String> arrayList = new ArrayList<String>();
Stopwatch sw = new Stopwatch();
sw.start();
for (int i = 0; i < numElems; i++) {
arrayList.add(obj);
}
System.out.println(" ArrayList " + sw.elapsedMillis());
}
// test ChunkedArrayList
System.gc();
{
ChunkedArrayList<String> chunkedList = new ChunkedArrayList<String>();
Stopwatch sw = new Stopwatch();
sw.start();
for (int i = 0; i < numElems; i++) {
chunkedList.add(obj);
}
System.out.println("ChunkedArrayList " + sw.elapsedMillis());
}
}
}
}