ARTEMIS-710 Avoid inefficient iteration over Map

This commit is contained in:
Martin Styk 2016-08-31 16:36:09 +02:00 committed by Clebert Suconic
parent 93b3caba75
commit 275924e8f4
3 changed files with 10 additions and 9 deletions

View File

@ -321,9 +321,9 @@ public class PostOfficeJournalLoader implements JournalLoader {
Map<SimpleString, Map<Long, Map<Long, List<PageCountPending>>>> perAddressMap = generateMapsOnPendingCount(queues, pendingNonTXPageCounter, txRecoverCounter);
for (SimpleString address : perAddressMap.keySet()) {
PagingStore store = pagingManager.getPageStore(address);
Map<Long, Map<Long, List<PageCountPending>>> perPageMap = perAddressMap.get(address);
for (Map.Entry<SimpleString, Map<Long, Map<Long, List<PageCountPending>>>> addressPageMapEntry : perAddressMap.entrySet()) {
PagingStore store = pagingManager.getPageStore(addressPageMapEntry.getKey());
Map<Long, Map<Long, List<PageCountPending>>> perPageMap = addressPageMapEntry.getValue();
// We have already generated this before, so it can't be null
assert (perPageMap != null);
@ -376,7 +376,7 @@ public class PostOfficeJournalLoader implements JournalLoader {
}
else {
// on this case the page file didn't exist, we just remove all the records since the page is already gone
logger.debug("Page " + pageId + " didn't exist on address " + address + ", so we are just removing records");
logger.debug("Page " + pageId + " didn't exist on address " + addressPageMapEntry.getKey() + ", so we are just removing records");
for (List<PageCountPending> records : perQueue.values()) {
for (PageCountPending record : records) {
logger.debug("Removing pending page counter " + record.getID());

View File

@ -409,15 +409,15 @@ public class ScaleDownHandler {
try (ClientSession session = sessionFactory.createSession(user, password, true, false, false, false, 0);
ClientProducer producer = session.createProducer(managementAddress)) {
//todo - https://issues.jboss.org/browse/HORNETQ-1336
for (SimpleString address : duplicateIDMap.keySet()) {
for (Map.Entry<SimpleString,List<Pair<byte[], Long>>> entry : duplicateIDMap.entrySet()) {
ClientMessage message = session.createMessage(false);
List<Pair<byte[], Long>> list = duplicateIDMap.get(address);
List<Pair<byte[], Long>> list = entry.getValue();
String[] array = new String[list.size()];
for (int i = 0; i < list.size(); i++) {
Pair<byte[], Long> pair = list.get(i);
array[i] = new String(pair.getA());
}
ManagementHelper.putOperationInvocation(message, ResourceNames.CORE_SERVER, "updateDuplicateIdCache", address.toString(), array);
ManagementHelper.putOperationInvocation(message, ResourceNames.CORE_SERVER, "updateDuplicateIdCache", entry.getKey().toString(), array);
producer.send(message);
}
}

View File

@ -298,8 +298,9 @@ public abstract class ActiveMQTestBase extends Assert {
fail("Client Session Factories still trying to reconnect, see above to see where created");
}
Map<Thread, StackTraceElement[]> threadMap = Thread.getAllStackTraces();
for (Thread thread : threadMap.keySet()) {
StackTraceElement[] stack = threadMap.get(thread);
for (Map.Entry<Thread, StackTraceElement[]> entry : threadMap.entrySet()) {
Thread thread = entry.getKey();
StackTraceElement[] stack = entry.getValue();
for (StackTraceElement stackTraceElement : stack) {
if (stackTraceElement.getMethodName().contains("getConnectionWithRetry") && !alreadyFailedThread.contains(thread)) {
alreadyFailedThread.add(thread);