From bcb3e089d80dbb47d13f7e8597b1f69d558b12af Mon Sep 17 00:00:00 2001 From: Dhruv Parthasarathy Date: Wed, 24 Jul 2013 14:57:02 -0700 Subject: [PATCH] profiler added, commented out initialTotalCost normalization and normalizedInitialCost --- .../druid/master/BalancerCostAnalyzer.java | 17 +-- .../druid/master/DruidMasterBalancer.java | 30 ++--- .../metamx/druid/master/rules/LoadRule.java | 2 +- .../master/DruidMasterBalancerProfiler.java | 120 ++++++++++++++++++ 4 files changed, 137 insertions(+), 32 deletions(-) create mode 100644 server/src/test/java/com/metamx/druid/master/DruidMasterBalancerProfiler.java diff --git a/server/src/main/java/com/metamx/druid/master/BalancerCostAnalyzer.java b/server/src/main/java/com/metamx/druid/master/BalancerCostAnalyzer.java index 48b65552274..8f24acf5533 100644 --- a/server/src/main/java/com/metamx/druid/master/BalancerCostAnalyzer.java +++ b/server/src/main/java/com/metamx/druid/master/BalancerCostAnalyzer.java @@ -178,7 +178,7 @@ public class BalancerCostAnalyzer * * @return A ServerHolder with the new home for a segment. */ - public ServerHolder findNewSegmentHomeBalance( + public ServerHolder findNewSegmentHome( final DataSegment proposalSegment, final Iterable serverHolders ) @@ -194,21 +194,6 @@ public class BalancerCostAnalyzer * * @return A ServerHolder with the new home for a segment. */ - public ServerHolder findNewSegmentHomeAssign( - final DataSegment proposalSegment, - final Iterable serverHolders - ) - { - MinMaxPriorityQueue> costsAndServers = computeCosts(proposalSegment, serverHolders); - while (!costsAndServers.isEmpty()) { - ServerHolder toServer = costsAndServers.pollFirst().rhs; - if (!toServer.isServingSegment(proposalSegment)) { - return toServer; - } - } - - return null; - } private Pair computeCosts( final DataSegment proposalSegment, diff --git a/server/src/main/java/com/metamx/druid/master/DruidMasterBalancer.java b/server/src/main/java/com/metamx/druid/master/DruidMasterBalancer.java index 05e5992466d..d27b3797429 100644 --- a/server/src/main/java/com/metamx/druid/master/DruidMasterBalancer.java +++ b/server/src/main/java/com/metamx/druid/master/DruidMasterBalancer.java @@ -116,7 +116,7 @@ public class DruidMasterBalancer implements DruidMasterHelper final BalancerSegmentHolder segmentToMove = analyzer.pickSegmentToMove(serverHolderList); if (params.getAvailableSegments().contains(segmentToMove.getSegment())) { - final ServerHolder holder = analyzer.findNewSegmentHomeBalance(segmentToMove.getSegment(), serverHolderList); + final ServerHolder holder = analyzer.findNewSegmentHome(segmentToMove.getSegment(), serverHolderList); if (holder != null) { moveSegment(segmentToMove, holder.getServer(), params); @@ -124,23 +124,23 @@ public class DruidMasterBalancer implements DruidMasterHelper } } - final double initialTotalCost = analyzer.calculateInitialTotalCost(serverHolderList); - final double normalization = analyzer.calculateNormalization(serverHolderList); - final double normalizedInitialCost = initialTotalCost / normalization; +// final double initialTotalCost = analyzer.calculateInitialTotalCost(serverHolderList); +// final double normalization = analyzer.calculateNormalization(serverHolderList); +// final double normalizedInitialCost = initialTotalCost / normalization; - stats.addToTieredStat("initialCost", tier, (long) initialTotalCost); - stats.addToTieredStat("normalization", tier, (long) normalization); - stats.addToTieredStat("normalizedInitialCostTimesOneThousand", tier, (long) (normalizedInitialCost * 1000)); +// stats.addToTieredStat("initialCost", tier, (long) initialTotalCost); +// stats.addToTieredStat("normalization", tier, (long) normalization); +// stats.addToTieredStat("normalizedInitialCostTimesOneThousand", tier, (long) (normalizedInitialCost * 1000)); stats.addToTieredStat("movedCount", tier, currentlyMovingSegments.get(tier).size()); - log.info( - "[%s]: Initial Total Cost: [%f], Normalization: [%f], Initial Normalized Cost: [%f], Segments Moved: [%d]", - tier, - initialTotalCost, - normalization, - normalizedInitialCost, - currentlyMovingSegments.get(tier).size() - ); +// log.info( +// "Segments Moved: [%d]", +// tier, +//// initialTotalCost, +//// normalization, +//// normalizedInitialCost, +// currentlyMovingSegments.get(tier).size() +// ); } return params.buildFromExisting() diff --git a/server/src/main/java/com/metamx/druid/master/rules/LoadRule.java b/server/src/main/java/com/metamx/druid/master/rules/LoadRule.java index 8a9a014aa79..7a6fb3b9158 100644 --- a/server/src/main/java/com/metamx/druid/master/rules/LoadRule.java +++ b/server/src/main/java/com/metamx/druid/master/rules/LoadRule.java @@ -98,7 +98,7 @@ public abstract class LoadRule implements Rule break; } - final ServerHolder holder = analyzer.findNewSegmentHomeAssign(segment, serverHolderList); + final ServerHolder holder = analyzer.findNewSegmentHome(segment, serverHolderList); if (holder == null) { log.warn( diff --git a/server/src/test/java/com/metamx/druid/master/DruidMasterBalancerProfiler.java b/server/src/test/java/com/metamx/druid/master/DruidMasterBalancerProfiler.java new file mode 100644 index 00000000000..31730d66bb8 --- /dev/null +++ b/server/src/test/java/com/metamx/druid/master/DruidMasterBalancerProfiler.java @@ -0,0 +1,120 @@ +package com.metamx.druid.master; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.MinMaxPriorityQueue; +import com.metamx.druid.client.DataSegment; +import com.metamx.druid.client.DruidServer; +import com.metamx.druid.shard.NoneShardSpec; +import org.easymock.EasyMock; +import org.joda.time.DateTime; +import org.joda.time.Interval; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +public class DruidMasterBalancerProfiler +{ + private static final int MAX_SEGMENTS_TO_MOVE = 5; + private DruidMaster master; + private DruidServer druidServer1; + private DruidServer druidServer2; + Map segments = Maps.newHashMap(); + DateTime start1 = new DateTime("2012-01-01"); + DateTime version = new DateTime("2012-03-01"); + + @Before + public void setUp() throws Exception + { + master = EasyMock.createMock(DruidMaster.class); + druidServer1 = EasyMock.createMock(DruidServer.class); + druidServer2 = EasyMock.createMock(DruidServer.class); + for (int i=0; i<55000;i++) + { + DataSegment segment = new DataSegment( + "datasource1", + new Interval(start1, start1.plusHours(1)), + version.toString(), + Maps.newHashMap(), + Lists.newArrayList(), + Lists.newArrayList(), + new NoneShardSpec(), + 0, + 11L + ); + segments.put("datasource"+i+"_2012-01-01T00:00:00.000Z_2012-01-01T01:00:00.000Z_2012-03-01T00:00:00.000Z",segment); + } + } + + @After + public void tearDown() throws Exception + { + EasyMock.verify(master); + EasyMock.verify(druidServer1); + EasyMock.verify(druidServer2); + } + + + @Test + public void profileRun(){ + LoadQueuePeonTester fromPeon = new LoadQueuePeonTester(); + LoadQueuePeonTester toPeon = new LoadQueuePeonTester(); + + EasyMock.expect(druidServer1.getName()).andReturn("from").atLeastOnce(); + EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce(); + EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce(); + EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes(); + EasyMock.expect(druidServer1.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes(); + EasyMock.replay(druidServer1); + + EasyMock.expect(druidServer2.getName()).andReturn("to").atLeastOnce(); + EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes(); + EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce(); + EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce(); + EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap()).anyTimes(); + EasyMock.expect(druidServer2.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes(); + EasyMock.replay(druidServer2); + + master.moveSegment( + EasyMock.anyObject(), + EasyMock.anyObject(), + EasyMock.anyObject(), + EasyMock.anyObject() + ); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(master); + + DruidMasterRuntimeParams params = + DruidMasterRuntimeParams.newBuilder() + .withDruidCluster( + new DruidCluster( + ImmutableMap.>of( + "normal", + MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator) + .create( + Arrays.asList( + new ServerHolder(druidServer1, fromPeon), + new ServerHolder(druidServer2, toPeon) + ) + ) + ) + ) + ) + .withLoadManagementPeons(ImmutableMap.of("from", fromPeon, "to", toPeon)) + .withAvailableSegments(segments.values()) + .withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE) + .withBalancerReferenceTimestamp(new DateTime("2013-01-01")) + .build(); + for (int i=0;i<1000;i++) + { + params = new DruidMasterBalancerTester(master).run(params); + } + + } + +}