profiler added, commented out initialTotalCost normalization and normalizedInitialCost

This commit is contained in:
Dhruv Parthasarathy 2013-07-24 14:57:02 -07:00
parent 6dc8d09b00
commit bcb3e089d8
4 changed files with 137 additions and 32 deletions

View File

@ -178,7 +178,7 @@ public class BalancerCostAnalyzer
* *
* @return A ServerHolder with the new home for a segment. * @return A ServerHolder with the new home for a segment.
*/ */
public ServerHolder findNewSegmentHomeBalance( public ServerHolder findNewSegmentHome(
final DataSegment proposalSegment, final DataSegment proposalSegment,
final Iterable<ServerHolder> serverHolders final Iterable<ServerHolder> serverHolders
) )
@ -194,21 +194,6 @@ public class BalancerCostAnalyzer
* *
* @return A ServerHolder with the new home for a segment. * @return A ServerHolder with the new home for a segment.
*/ */
public ServerHolder findNewSegmentHomeAssign(
final DataSegment proposalSegment,
final Iterable<ServerHolder> serverHolders
)
{
MinMaxPriorityQueue<Pair<Double, ServerHolder>> costsAndServers = computeCosts(proposalSegment, serverHolders);
while (!costsAndServers.isEmpty()) {
ServerHolder toServer = costsAndServers.pollFirst().rhs;
if (!toServer.isServingSegment(proposalSegment)) {
return toServer;
}
}
return null;
}
private Pair<Double, ServerHolder> computeCosts( private Pair<Double, ServerHolder> computeCosts(
final DataSegment proposalSegment, final DataSegment proposalSegment,

View File

@ -116,7 +116,7 @@ public class DruidMasterBalancer implements DruidMasterHelper
final BalancerSegmentHolder segmentToMove = analyzer.pickSegmentToMove(serverHolderList); final BalancerSegmentHolder segmentToMove = analyzer.pickSegmentToMove(serverHolderList);
if (params.getAvailableSegments().contains(segmentToMove.getSegment())) { if (params.getAvailableSegments().contains(segmentToMove.getSegment())) {
final ServerHolder holder = analyzer.findNewSegmentHomeBalance(segmentToMove.getSegment(), serverHolderList); final ServerHolder holder = analyzer.findNewSegmentHome(segmentToMove.getSegment(), serverHolderList);
if (holder != null) { if (holder != null) {
moveSegment(segmentToMove, holder.getServer(), params); moveSegment(segmentToMove, holder.getServer(), params);
@ -124,23 +124,23 @@ public class DruidMasterBalancer implements DruidMasterHelper
} }
} }
final double initialTotalCost = analyzer.calculateInitialTotalCost(serverHolderList); // final double initialTotalCost = analyzer.calculateInitialTotalCost(serverHolderList);
final double normalization = analyzer.calculateNormalization(serverHolderList); // final double normalization = analyzer.calculateNormalization(serverHolderList);
final double normalizedInitialCost = initialTotalCost / normalization; // final double normalizedInitialCost = initialTotalCost / normalization;
stats.addToTieredStat("initialCost", tier, (long) initialTotalCost); // stats.addToTieredStat("initialCost", tier, (long) initialTotalCost);
stats.addToTieredStat("normalization", tier, (long) normalization); // stats.addToTieredStat("normalization", tier, (long) normalization);
stats.addToTieredStat("normalizedInitialCostTimesOneThousand", tier, (long) (normalizedInitialCost * 1000)); // stats.addToTieredStat("normalizedInitialCostTimesOneThousand", tier, (long) (normalizedInitialCost * 1000));
stats.addToTieredStat("movedCount", tier, currentlyMovingSegments.get(tier).size()); stats.addToTieredStat("movedCount", tier, currentlyMovingSegments.get(tier).size());
log.info( // log.info(
"[%s]: Initial Total Cost: [%f], Normalization: [%f], Initial Normalized Cost: [%f], Segments Moved: [%d]", // "Segments Moved: [%d]",
tier, // tier,
initialTotalCost, //// initialTotalCost,
normalization, //// normalization,
normalizedInitialCost, //// normalizedInitialCost,
currentlyMovingSegments.get(tier).size() // currentlyMovingSegments.get(tier).size()
); // );
} }
return params.buildFromExisting() return params.buildFromExisting()

View File

@ -98,7 +98,7 @@ public abstract class LoadRule implements Rule
break; break;
} }
final ServerHolder holder = analyzer.findNewSegmentHomeAssign(segment, serverHolderList); final ServerHolder holder = analyzer.findNewSegmentHome(segment, serverHolderList);
if (holder == null) { if (holder == null) {
log.warn( log.warn(

View File

@ -0,0 +1,120 @@
package com.metamx.druid.master;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.MinMaxPriorityQueue;
import com.metamx.druid.client.DataSegment;
import com.metamx.druid.client.DruidServer;
import com.metamx.druid.shard.NoneShardSpec;
import org.easymock.EasyMock;
import org.joda.time.DateTime;
import org.joda.time.Interval;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class DruidMasterBalancerProfiler
{
private static final int MAX_SEGMENTS_TO_MOVE = 5;
private DruidMaster master;
private DruidServer druidServer1;
private DruidServer druidServer2;
Map<String, DataSegment> segments = Maps.newHashMap();
DateTime start1 = new DateTime("2012-01-01");
DateTime version = new DateTime("2012-03-01");
@Before
public void setUp() throws Exception
{
master = EasyMock.createMock(DruidMaster.class);
druidServer1 = EasyMock.createMock(DruidServer.class);
druidServer2 = EasyMock.createMock(DruidServer.class);
for (int i=0; i<55000;i++)
{
DataSegment segment = new DataSegment(
"datasource1",
new Interval(start1, start1.plusHours(1)),
version.toString(),
Maps.<String, Object>newHashMap(),
Lists.<String>newArrayList(),
Lists.<String>newArrayList(),
new NoneShardSpec(),
0,
11L
);
segments.put("datasource"+i+"_2012-01-01T00:00:00.000Z_2012-01-01T01:00:00.000Z_2012-03-01T00:00:00.000Z",segment);
}
}
@After
public void tearDown() throws Exception
{
EasyMock.verify(master);
EasyMock.verify(druidServer1);
EasyMock.verify(druidServer2);
}
@Test
public void profileRun(){
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
LoadQueuePeonTester toPeon = new LoadQueuePeonTester();
EasyMock.expect(druidServer1.getName()).andReturn("from").atLeastOnce();
EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce();
EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce();
EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes();
EasyMock.expect(druidServer1.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
EasyMock.replay(druidServer1);
EasyMock.expect(druidServer2.getName()).andReturn("to").atLeastOnce();
EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes();
EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce();
EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce();
EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
EasyMock.expect(druidServer2.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
EasyMock.replay(druidServer2);
master.moveSegment(
EasyMock.<String>anyObject(),
EasyMock.<String>anyObject(),
EasyMock.<String>anyObject(),
EasyMock.<LoadPeonCallback>anyObject()
);
EasyMock.expectLastCall().anyTimes();
EasyMock.replay(master);
DruidMasterRuntimeParams params =
DruidMasterRuntimeParams.newBuilder()
.withDruidCluster(
new DruidCluster(
ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of(
"normal",
MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator)
.create(
Arrays.asList(
new ServerHolder(druidServer1, fromPeon),
new ServerHolder(druidServer2, toPeon)
)
)
)
)
)
.withLoadManagementPeons(ImmutableMap.<String, LoadQueuePeon>of("from", fromPeon, "to", toPeon))
.withAvailableSegments(segments.values())
.withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE)
.withBalancerReferenceTimestamp(new DateTime("2013-01-01"))
.build();
for (int i=0;i<1000;i++)
{
params = new DruidMasterBalancerTester(master).run(params);
}
}
}