Add deep storage segment metric (#16072)

* Add new metric for deepStorage segments

* Add docs

* change metric name
This commit is contained in:
George Shiqi Wu 2024-03-11 10:24:46 -04:00 committed by GitHub
parent 2dd8b16467
commit 94d2a28465
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 34 additions and 0 deletions

View File

@ -332,6 +332,7 @@ These metrics are for the Druid Coordinator and are reset each time the Coordina
|`segment/unneededEternityTombstone/count`|Number of non-overshadowed eternity tombstones marked as unused.| |Varies|
|`segment/unavailable/count`|Number of unique segments left to load until all used segments are available for queries.|`dataSource`|0|
|`segment/underReplicated/count`|Number of segments, including replicas, left to load until all used segments are available for queries.|`tier`, `dataSource`|0|
|`segment/availableDeepStorageOnly/count`|Number of unique segments that are only available for querying directly from deep storage.|`dataSource`|Varies|
|`tier/historical/count`|Number of available historical nodes in each tier.|`tier`|Varies|
|`tier/replication/factor`|Configured maximum replication factor in each tier.|`tier`|Varies|
|`tier/required/capacity`|Total capacity in bytes required in each tier.|`tier`|Varies|

View File

@ -252,6 +252,25 @@ public class DruidCoordinator
return datasourceToUnavailableSegments;
}
public Object2IntMap<String> getDatasourceToDeepStorageQueryOnlySegmentCount()
{
if (segmentReplicationStatus == null) {
return Object2IntMaps.emptyMap();
}
final Object2IntOpenHashMap<String> datasourceToDeepStorageOnlySegments = new Object2IntOpenHashMap<>();
final Iterable<DataSegment> dataSegments = metadataManager.segments().iterateAllUsedSegments();
for (DataSegment segment : dataSegments) {
SegmentReplicaCount replicaCount = segmentReplicationStatus.getReplicaCountsInCluster(segment.getId());
if (replicaCount != null && replicaCount.totalLoaded() == 0 && replicaCount.required() == 0) {
datasourceToDeepStorageOnlySegments.addTo(segment.getDataSource(), 1);
}
}
return datasourceToDeepStorageOnlySegments;
}
public Map<String, Double> getDatasourceToLoadStatus()
{
final Map<String, Double> loadStatus = new HashMap<>();
@ -762,6 +781,13 @@ public class DruidCoordinator
stats.addToSegmentStat(Stats.Segments.UNDER_REPLICATED, tier, dataSource, underReplicatedCount)
)
);
getDatasourceToDeepStorageQueryOnlySegmentCount().forEach(
(dataSource, numDeepStorageOnly) -> stats.add(
Stats.Segments.DEEP_STORAGE_ONLY,
RowKey.of(Dimension.DATASOURCE, dataSource),
numDeepStorageOnly
)
);
return params;
}

View File

@ -53,6 +53,8 @@ public class Stats
= CoordinatorStat.toDebugAndEmit("underreplicated", "segment/underReplicated/count");
public static final CoordinatorStat UNAVAILABLE
= CoordinatorStat.toDebugAndEmit("unavailable", "segment/unavailable/count");
public static final CoordinatorStat DEEP_STORAGE_ONLY
= CoordinatorStat.toDebugAndEmit("deepStorageOnly", "segment/availableDeepStorageOnly/count");
public static final CoordinatorStat UNNEEDED
= CoordinatorStat.toDebugAndEmit("unneeded", "segment/unneeded/count");
public static final CoordinatorStat OVERSHADOWED

View File

@ -895,6 +895,11 @@ public class DruidCoordinatorTest extends CuratorTestBase
Assert.assertNotNull(underRepliicationCountsPerDataSourceColdTier);
Assert.assertEquals(0, underRepliicationCountsPerDataSourceColdTier.getLong(dataSource));
Object2IntMap<String> numsDeepStorageOnlySegmentsPerDataSource =
coordinator.getDatasourceToDeepStorageQueryOnlySegmentCount();
Assert.assertEquals(1, numsDeepStorageOnlySegmentsPerDataSource.getInt(dataSource));
coordinator.stop();
leaderUnannouncerLatch.await();