From 441e5a94c8296aac27336ddec2e3a4f8106346ba Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Fri, 22 Aug 2025 12:44:04 -0400 Subject: [PATCH 01/13] feat(metrics)!: add cardinality_limit --- .../sdk/metrics/aggregation/sum.rb | 137 +++++++++++++++--- .../sdk/metrics/export/metric_reader.rb | 5 +- .../sdk/metrics/state/metric_store.rb | 4 +- .../sdk/metrics/state/metric_stream.rb | 30 +++- .../sdk/metrics/view/registered_view.rb | 3 +- 5 files changed, 147 insertions(+), 32 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb index fc1e65a4f8..7de732f83f 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb @@ -9,54 +9,147 @@ module SDK module Metrics module Aggregation # Contains the implementation of the Sum aggregation - # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#sum-aggregation class Sum + OVERFLOW_ATTRIBUTE_SET = { 'otel.metric.overflow' => true }.freeze + def initialize(aggregation_temporality: ENV.fetch('OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE', :cumulative), monotonic: false, instrument_kind: nil) @aggregation_temporality = AggregationTemporality.determine_temporality(aggregation_temporality: aggregation_temporality, instrument_kind: instrument_kind, default: :cumulative) @monotonic = monotonic + @overflow_started = false + @pre_overflow_attributes = Set.new if @aggregation_temporality.cumulative? end - def collect(start_time, end_time, data_points) + def collect(start_time, end_time, data_points, cardinality_limit) if @aggregation_temporality.delta? - # Set timestamps and 'move' data point values to result. - ndps = data_points.values.map! do |ndp| - ndp.start_time_unix_nano = start_time - ndp.time_unix_nano = end_time - ndp + collect_delta(start_time, end_time, data_points, cardinality_limit) + else + collect_cumulative(start_time, end_time, data_points, cardinality_limit) + end + end + + def update(increment, attributes, data_points, cardinality_limit) + return if @monotonic && increment < 0 + + # Check if we already have this attribute set + if data_points.key?(attributes) + data_points[attributes].value += increment + return + end + + # For cumulative: track pre-overflow attributes + if @aggregation_temporality.cumulative? + if !@overflow_started && data_points.size < cardinality_limit + @pre_overflow_attributes.add(attributes) + create_new_data_point(attributes, increment, data_points) + return + elsif @pre_overflow_attributes.include?(attributes) + # Allow pre-overflow attributes even after overflow started + create_new_data_point(attributes, increment, data_points) + return end - data_points.clear - ndps else - # Update timestamps and take a snapshot. - data_points.values.map! do |ndp| - ndp.start_time_unix_nano ||= start_time # Start time of a data point is from the first observation. - ndp.time_unix_nano = end_time - ndp.dup + # For delta: simple size check + if data_points.size < cardinality_limit + create_new_data_point(attributes, increment, data_points) + return end end + + # Overflow case: aggregate into overflow data point + @overflow_started = true + overflow_ndp = data_points[OVERFLOW_ATTRIBUTE_SET] || data_points[OVERFLOW_ATTRIBUTE_SET] = NumberDataPoint.new( + OVERFLOW_ATTRIBUTE_SET, + nil, + nil, + 0, + nil + ) + overflow_ndp.value += increment end def monotonic? @monotonic end - def update(increment, attributes, data_points) - return if @monotonic && increment < 0 + def aggregation_temporality + @aggregation_temporality.temporality + end + + private - ndp = data_points[attributes] || data_points[attributes] = NumberDataPoint.new( + def create_new_data_point(attributes, increment, data_points) + data_points[attributes] = NumberDataPoint.new( attributes, nil, nil, - 0, + increment, nil ) + end + + def collect_cumulative(start_time, end_time, data_points, cardinality_limit) + # Cumulative: return all data points (including overflow if present) + result = data_points.values.map do |ndp| + ndp.start_time_unix_nano ||= start_time + ndp.time_unix_nano = end_time + ndp.dup + end - ndp.value += increment - nil + # Apply cardinality limit if we have more points than limit + apply_cardinality_limit_to_result(result, cardinality_limit) end - def aggregation_temporality - @aggregation_temporality.temporality + def collect_delta(start_time, end_time, data_points, cardinality_limit) + # Delta: can choose arbitrary subset each collection + all_points = data_points.values + + if all_points.size <= cardinality_limit + # All points fit within limit + result = all_points.map do |ndp| + ndp.start_time_unix_nano = start_time + ndp.time_unix_nano = end_time + ndp + end + else + # Apply cardinality limit by choosing subset + overflow + selected_points = choose_delta_subset(all_points, cardinality_limit - 1) + remaining_points = all_points - selected_points + + result = selected_points.map do |ndp| + ndp.start_time_unix_nano = start_time + ndp.time_unix_nano = end_time + ndp + end + + # Create overflow point for remaining + if remaining_points.any? + overflow_value = remaining_points.sum(&:value) + overflow_point = NumberDataPoint.new( + OVERFLOW_ATTRIBUTE_SET, + start_time, + end_time, + overflow_value, + nil + ) + result << overflow_point + end + end + + data_points.clear + result + end + + def apply_cardinality_limit_to_result(result, cardinality_limit) + return result if result.size <= cardinality_limit + + # For cumulative, we should have already enforced this in update() + # But as safety net, keep first N points + result.first(cardinality_limit) + end + + def choose_delta_subset(points, count) + # Strategy: keep points with highest absolute values + points.sort_by { |point| -point.value.abs }.first(count) end end end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/metric_reader.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/metric_reader.rb index c261f4f10a..9969f3a8ab 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/metric_reader.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/metric_reader.rb @@ -14,12 +14,13 @@ module Export class MetricReader attr_reader :metric_store - def initialize + def initialize(aggregation_cardinality_limit: nil) @metric_store = OpenTelemetry::SDK::Metrics::State::MetricStore.new + @cardinality_limit = aggregation_cardinality_limit end def collect - @metric_store.collect + @metric_store.collect(cardinality_limit: @cardinality_limit) end def shutdown(timeout: nil) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_store.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_store.rb index 1153e55b16..53848f33d5 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_store.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_store.rb @@ -20,10 +20,10 @@ def initialize @metric_streams = [] end - def collect + def collect(cardinality_limit: nil) @mutex.synchronize do @epoch_end_time = now_in_nano - snapshot = @metric_streams.flat_map { |ms| ms.collect(@epoch_start_time, @epoch_end_time) } + snapshot = @metric_streams.flat_map { |ms| ms.collect(@epoch_start_time, @epoch_end_time, cardinality_limit: cardinality_limit) } @epoch_start_time = @epoch_end_time snapshot diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb index 3fa3bd684c..5a17bdf749 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb @@ -15,6 +15,8 @@ module State class MetricStream attr_reader :name, :description, :unit, :instrument_kind, :instrumentation_scope, :data_points + DEFAULT_CARDINALITY_LIMIT = 2000 + def initialize( name, description, @@ -38,7 +40,7 @@ def initialize( @mutex = Mutex.new end - def collect(start_time, end_time) + def collect(start_time, end_time, cardinality_limit: nil) @mutex.synchronize do metric_data = [] @@ -46,15 +48,27 @@ def collect(start_time, end_time) return metric_data if @data_points.empty? if @registered_views.empty? - metric_data << aggregate_metric_data(start_time, end_time) + resolved_cardinality_limit = resolve_cardinality_limit(view: nil, cardinality_limit: cardinality_limit) + metric_data << aggregate_metric_data(start_time, + end_time, + resolved_cardinality_limit) else - @registered_views.each { |view| metric_data << aggregate_metric_data(start_time, end_time, aggregation: view.aggregation) } + @registered_views.each do |view| + resolved_cardinality_limit = resolve_cardinality_limit(view: view, cardinality_limit: cardinality_limit) + metric_data << aggregate_metric_data(start_time, + end_time, + resolved_cardinality_limit, + aggregation: view.aggregation) + end end metric_data end end + # view has the cardinality, pass to aggregation update + # to determine if aggregation have the cardinality + # if the aggregation does not have the cardinality, then it will be default 2000 def update(value, attributes) if @registered_views.empty? @mutex.synchronize { @default_aggregation.update(value, attributes, @data_points) } @@ -69,7 +83,7 @@ def update(value, attributes) end end - def aggregate_metric_data(start_time, end_time, aggregation: nil) + def aggregate_metric_data(start_time, end_time, cardinality_limit, aggregation: nil) aggregator = aggregation || @default_aggregation is_monotonic = aggregator.respond_to?(:monotonic?) ? aggregator.monotonic? : nil aggregation_temporality = aggregator.respond_to?(:aggregation_temporality) ? aggregator.aggregation_temporality : nil @@ -81,7 +95,7 @@ def aggregate_metric_data(start_time, end_time, aggregation: nil) @instrument_kind, @meter_provider.resource, @instrumentation_scope, - aggregator.collect(start_time, end_time, @data_points), + aggregator.collect(start_time, end_time, @data_points, cardinality_limit: cardinality_limit), aggregation_temporality, start_time, end_time, @@ -95,6 +109,12 @@ def find_registered_view @meter_provider.registered_views.each { |view| @registered_views << view if view.match_instrument?(self) } end + def resolve_cardinality_limit(view: nil, cardinality_limit: nil) + return view.aggregation_cardinality_limit if view&.aggregation_cardinality_limit + return cardinality_limit if cardinality_limit + DEFAULT_CARDINALITY_LIMIT + end + def to_s instrument_info = +'' instrument_info << "name=#{@name}" diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/view/registered_view.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/view/registered_view.rb index 881f0f261e..5a4e3c971f 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/view/registered_view.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/view/registered_view.rb @@ -10,13 +10,14 @@ module Metrics module View # RegisteredView is an internal class used to match Views with a given {MetricStream} class RegisteredView - attr_reader :name, :aggregation, :attribute_keys, :regex + attr_reader :name, :aggregation, :attribute_keys, :regex, :aggregation_cardinality_limit def initialize(name, **options) @name = name @options = options @aggregation = options[:aggregation] @attribute_keys = options[:attribute_keys] || {} + @aggregation_cardinality_limit = options[:aggregation_cardinality_limit] generate_regex_pattern(name) end From 50c672989701b395ed45f654dc142c9469772d9b Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Fri, 22 Aug 2025 13:00:59 -0400 Subject: [PATCH 02/13] update remaining aggregation --- .../sdk/metrics/aggregation/drop.rb | 4 +- .../aggregation/explicit_bucket_histogram.rb | 168 +++++++++++++--- .../exponential_bucket_histogram.rb | 187 ++++++++++++++---- .../sdk/metrics/aggregation/last_value.rb | 85 +++++++- 4 files changed, 370 insertions(+), 74 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb index 65e351b328..fc9dd4a12d 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb @@ -10,11 +10,11 @@ module Metrics module Aggregation # Contains the implementation of the Drop aggregation class Drop - def collect(start_time, end_time, data_points) + def collect(start_time, end_time, data_points, cardinality_limit: nil) data_points.values.map!(&:dup) end - def update(increment, attributes, data_points) + def update(increment, attributes, data_points, cardinality_limit: nil) data_points[attributes] = NumberDataPoint.new( {}, 0, diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb index 768036fc86..7412508870 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb @@ -11,6 +11,7 @@ module Aggregation # Contains the implementation of the ExplicitBucketHistogram aggregation # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation class ExplicitBucketHistogram + OVERFLOW_ATTRIBUTE_SET = { 'otel.metric.overflow' => true }.freeze DEFAULT_BOUNDARIES = [0, 5, 10, 25, 50, 75, 100, 250, 500, 1000].freeze private_constant :DEFAULT_BOUNDARIES @@ -26,21 +27,61 @@ def initialize( @aggregation_temporality = AggregationTemporality.determine_temporality(aggregation_temporality: aggregation_temporality, default: :cumulative) @boundaries = boundaries && !boundaries.empty? ? boundaries.sort : nil @record_min_max = record_min_max + @overflow_started = false end - def collect(start_time, end_time, data_points) + def collect(start_time, end_time, data_points, cardinality_limit: 2000) + all_points = data_points.values + + # Apply cardinality limit + if all_points.size <= cardinality_limit + result = process_all_points(all_points, start_time, end_time) + else + result = process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) + end + + data_points.clear if @aggregation_temporality.delta? + result + end + + def update(amount, attributes, data_points, cardinality_limit: 2000) + # Check if we already have this attribute set + if data_points.key?(attributes) + hdp = data_points[attributes] + else + # Check cardinality limit for new attribute sets + if data_points.size >= cardinality_limit + # Overflow: aggregate into overflow data point + @overflow_started = true + hdp = data_points[OVERFLOW_ATTRIBUTE_SET] || create_overflow_data_point(data_points) + else + # Normal case - create new data point + hdp = create_new_data_point(attributes, data_points) + end + end + + # Update the histogram data point + update_histogram_data_point(hdp, amount) + nil + end + + def aggregation_temporality + @aggregation_temporality.temporality + end + + private + + def process_all_points(all_points, start_time, end_time) if @aggregation_temporality.delta? # Set timestamps and 'move' data point values to result. - hdps = data_points.values.map! do |hdp| + all_points.map! do |hdp| hdp.start_time_unix_nano = start_time hdp.time_unix_nano = end_time hdp end - data_points.clear - hdps else # Update timestamps and take a snapshot. - data_points.values.map! do |hdp| + all_points.map! do |hdp| hdp.start_time_unix_nano ||= start_time # Start time of a data point is from the first observation. hdp.time_unix_nano = end_time hdp = hdp.dup @@ -50,27 +91,103 @@ def collect(start_time, end_time, data_points) end end - def update(amount, attributes, data_points) - hdp = data_points.fetch(attributes) do - if @record_min_max - min = Float::INFINITY - max = -Float::INFINITY + def process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) + # Choose subset of histograms (prefer those with higher counts) + selected_points = choose_histogram_subset(all_points, cardinality_limit - 1) + remaining_points = all_points - selected_points + + result = process_all_points(selected_points, start_time, end_time) + + # Create overflow histogram by merging remaining points + if remaining_points.any? + overflow_point = merge_histogram_points(remaining_points, start_time, end_time) + result << overflow_point + end + + result + end + + def choose_histogram_subset(points, count) + # Strategy: keep histograms with highest counts (most data) + points.sort_by { |hdp| -hdp.count }.first(count) + end + + def merge_histogram_points(points, start_time, end_time) + # Create a merged histogram with overflow attributes + merged_bucket_counts = empty_bucket_counts + + merged = HistogramDataPoint.new( + OVERFLOW_ATTRIBUTE_SET, + start_time, + end_time, + 0, # count + 0.0, # sum + merged_bucket_counts, + @boundaries, + nil, # exemplars + Float::INFINITY, # min + -Float::INFINITY # max + ) + + # Merge all remaining points into the overflow point + points.each do |hdp| + merged.count += hdp.count + merged.sum += hdp.sum + merged.min = [merged.min, hdp.min].min if hdp.min + merged.max = [merged.max, hdp.max].max if hdp.max + + # Merge bucket counts + if merged_bucket_counts && hdp.bucket_counts + hdp.bucket_counts.each_with_index do |count, index| + merged_bucket_counts[index] += count + end end + end + + merged + end + + def create_overflow_data_point(data_points) + if @record_min_max + min = Float::INFINITY + max = -Float::INFINITY + end - data_points[attributes] = HistogramDataPoint.new( - attributes, - nil, # :start_time_unix_nano - nil, # :time_unix_nano - 0, # :count - 0, # :sum - empty_bucket_counts, # :bucket_counts - @boundaries, # :explicit_bounds - nil, # :exemplars - min, # :min - max # :max - ) + data_points[OVERFLOW_ATTRIBUTE_SET] = HistogramDataPoint.new( + OVERFLOW_ATTRIBUTE_SET, + nil, # :start_time_unix_nano + nil, # :time_unix_nano + 0, # :count + 0, # :sum + empty_bucket_counts, # :bucket_counts + @boundaries, # :explicit_bounds + nil, # :exemplars + min, # :min + max # :max + ) + end + + def create_new_data_point(attributes, data_points) + if @record_min_max + min = Float::INFINITY + max = -Float::INFINITY end + data_points[attributes] = HistogramDataPoint.new( + attributes, + nil, # :start_time_unix_nano + nil, # :time_unix_nano + 0, # :count + 0, # :sum + empty_bucket_counts, # :bucket_counts + @boundaries, # :explicit_bounds + nil, # :exemplars + min, # :min + max # :max + ) + end + + def update_histogram_data_point(hdp, amount) if @record_min_max hdp.max = amount if amount > hdp.max hdp.min = amount if amount < hdp.min @@ -82,15 +199,8 @@ def update(amount, attributes, data_points) bucket_index = @boundaries.bsearch_index { |i| i >= amount } || @boundaries.size hdp.bucket_counts[bucket_index] += 1 end - nil end - def aggregation_temporality - @aggregation_temporality.temporality - end - - private - def empty_bucket_counts @boundaries ? Array.new(@boundaries.size + 1, 0) : nil end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb index de82871a53..28ac9c3da1 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb @@ -16,6 +16,8 @@ module Metrics module Aggregation # Contains the implementation of the {https://opentelemetry.io/docs/specs/otel/metrics/data-model/#exponentialhistogram ExponentialBucketHistogram} aggregation class ExponentialBucketHistogram # rubocop:disable Metrics/ClassLength + OVERFLOW_ATTRIBUTE_SET = { 'otel.metric.overflow' => true }.freeze + # relate to min max scale: https://opentelemetry.io/docs/specs/otel/metrics/sdk/#support-a-minimum-and-maximum-scale DEFAULT_SIZE = 160 DEFAULT_SCALE = 20 @@ -42,23 +44,61 @@ def initialize( @zero_count = 0 @size = validate_size(max_size) @scale = validate_scale(max_scale) + @overflow_started = false @mapping = new_mapping(@scale) end - def collect(start_time, end_time, data_points) + def collect(start_time, end_time, data_points, cardinality_limit: 2000) + all_points = data_points.values + + # Apply cardinality limit + if all_points.size <= cardinality_limit + result = process_all_points(all_points, start_time, end_time) + else + result = process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) + end + + data_points.clear if @aggregation_temporality.delta? + result + end + + # rubocop:disable Metrics/MethodLength + def update(amount, attributes, data_points, cardinality_limit: 2000) + # Check if we already have this attribute set + if data_points.key?(attributes) + hdp = data_points[attributes] + else + # Check cardinality limit for new attribute sets + if data_points.size >= cardinality_limit + # Overflow: aggregate into overflow data point + @overflow_started = true + hdp = data_points[OVERFLOW_ATTRIBUTE_SET] || create_overflow_data_point(data_points) + else + # Normal case - create new data point + hdp = create_new_data_point(attributes, data_points) + end + end + + # Update the histogram data point with the new amount + update_histogram_data_point(hdp, amount) + nil + end + # rubocop:enable Metrics/MethodLength + + private + + def process_all_points(all_points, start_time, end_time) if @aggregation_temporality.delta? # Set timestamps and 'move' data point values to result. - hdps = data_points.values.map! do |hdp| + all_points.map! do |hdp| hdp.start_time_unix_nano = start_time hdp.time_unix_nano = end_time hdp end - data_points.clear - hdps else # Update timestamps and take a snapshot. - data_points.values.map! do |hdp| + all_points.map! do |hdp| hdp.start_time_unix_nano ||= start_time # Start time of a data point is from the first observation. hdp.time_unix_nano = end_time hdp = hdp.dup @@ -69,33 +109,110 @@ def collect(start_time, end_time, data_points) end end - # rubocop:disable Metrics/MethodLength - def update(amount, attributes, data_points) - # fetch or initialize the ExponentialHistogramDataPoint - hdp = data_points.fetch(attributes) do - if @record_min_max - min = Float::INFINITY - max = -Float::INFINITY - end + def process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) + # Choose subset of histograms (prefer those with higher counts) + selected_points = choose_histogram_subset(all_points, cardinality_limit - 1) + remaining_points = all_points - selected_points - data_points[attributes] = ExponentialHistogramDataPoint.new( - attributes, - nil, # :start_time_unix_nano - 0, # :time_unix_nano - 0, # :count - 0, # :sum - @scale, # :scale - @zero_count, # :zero_count - ExponentialHistogram::Buckets.new, # :positive - ExponentialHistogram::Buckets.new, # :negative - 0, # :flags - nil, # :exemplars - min, # :min - max, # :max - @zero_threshold # :zero_threshold) - ) + result = process_all_points(selected_points, start_time, end_time) + + # Create overflow histogram by merging remaining points + if remaining_points.any? + overflow_point = merge_histogram_points(remaining_points, start_time, end_time) + result << overflow_point end + result + end + + def choose_histogram_subset(points, count) + # Strategy: keep histograms with highest counts (most data) + points.sort_by { |hdp| -hdp.count }.first(count) + end + + def merge_histogram_points(points, start_time, end_time) + # Create a merged histogram with overflow attributes + merged = ExponentialHistogramDataPoint.new( + OVERFLOW_ATTRIBUTE_SET, + start_time, + end_time, + 0, # count + 0.0, # sum + @scale, + 0, # zero_count + ExponentialHistogram::Buckets.new, + ExponentialHistogram::Buckets.new, + 0, # flags + nil, # exemplars + Float::INFINITY, # min + -Float::INFINITY, # max + @zero_threshold + ) + + # Merge all remaining points into the overflow point + points.each do |hdp| + merged.count += hdp.count + merged.sum += hdp.sum + merged.zero_count += hdp.zero_count + merged.min = [merged.min, hdp.min].min if hdp.min + merged.max = [merged.max, hdp.max].max if hdp.max + + # Note: Merging buckets would require complex logic to handle different scales + # For simplicity, we aggregate the counts and sums + end + + merged + end + + def create_overflow_data_point(data_points) + if @record_min_max + min = Float::INFINITY + max = -Float::INFINITY + end + + data_points[OVERFLOW_ATTRIBUTE_SET] = ExponentialHistogramDataPoint.new( + OVERFLOW_ATTRIBUTE_SET, + nil, # :start_time_unix_nano + 0, # :time_unix_nano + 0, # :count + 0, # :sum + @scale, # :scale + @zero_count, # :zero_count + ExponentialHistogram::Buckets.new, # :positive + ExponentialHistogram::Buckets.new, # :negative + 0, # :flags + nil, # :exemplars + min, # :min + max, # :max + @zero_threshold # :zero_threshold) + ) + end + + def create_new_data_point(attributes, data_points) + if @record_min_max + min = Float::INFINITY + max = -Float::INFINITY + end + + data_points[attributes] = ExponentialHistogramDataPoint.new( + attributes, + nil, # :start_time_unix_nano + 0, # :time_unix_nano + 0, # :count + 0, # :sum + @scale, # :scale + @zero_count, # :zero_count + ExponentialHistogram::Buckets.new, # :positive + ExponentialHistogram::Buckets.new, # :negative + 0, # :flags + nil, # :exemplars + min, # :min + max, # :max + @zero_threshold # :zero_threshold) + ) + end + + def update_histogram_data_point(hdp, amount) # Start to populate the data point (esp. the buckets) if @record_min_max hdp.max = amount if amount > hdp.max @@ -162,16 +279,8 @@ def update(amount, attributes, data_points) bucket_index += buckets.counts.size if bucket_index.negative? buckets.increment_bucket(bucket_index) - nil - end - # rubocop:enable Metrics/MethodLength - - def aggregation_temporality - @aggregation_temporality.temporality end - private - def grow_buckets(span, buckets) return if span < buckets.counts.size @@ -179,6 +288,10 @@ def grow_buckets(span, buckets) buckets.grow(span + 1, @size) end + def aggregation_temporality + @aggregation_temporality.temporality + end + def new_mapping(scale) scale = validate_scale(scale) scale <= 0 ? ExponentialHistogram::ExponentMapping.new(scale) : ExponentialHistogram::LogarithmMapping.new(scale) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb index 8fb05912e2..cb5ab13716 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb @@ -10,17 +10,82 @@ module Metrics module Aggregation # Contains the implementation of the LastValue aggregation class LastValue - def collect(start_time, end_time, data_points) - ndps = data_points.values.map! do |ndp| - ndp.start_time_unix_nano = start_time - ndp.time_unix_nano = end_time - ndp + OVERFLOW_ATTRIBUTE_SET = { 'otel.metric.overflow' => true }.freeze + + def initialize + @overflow_started = false + end + + def collect(start_time, end_time, data_points, cardinality_limit: 2000) + # Apply cardinality limit by choosing subset + overflow for LastValue + all_points = data_points.values + + if all_points.size <= cardinality_limit + # All points fit within limit + ndps = all_points.map! do |ndp| + ndp.start_time_unix_nano = start_time + ndp.time_unix_nano = end_time + ndp + end + else + # Choose most recent values (LastValue behavior) + selected_points = choose_last_value_subset(all_points, cardinality_limit - 1) + remaining_points = all_points - selected_points + + ndps = selected_points.map do |ndp| + ndp.start_time_unix_nano = start_time + ndp.time_unix_nano = end_time + ndp + end + + # Create overflow point with the last remaining value + if remaining_points.any? + # For LastValue, use the most recent remaining value + overflow_value = remaining_points.max_by(&:time_unix_nano)&.value || 0 + overflow_point = NumberDataPoint.new( + OVERFLOW_ATTRIBUTE_SET, + start_time, + end_time, + overflow_value, + nil + ) + ndps << overflow_point + end end + data_points.clear ndps end - def update(increment, attributes, data_points) + def update(increment, attributes, data_points, cardinality_limit: 2000) + # Check if we already have this attribute set + if data_points.key?(attributes) + # Update existing data point (LastValue behavior - replace) + data_points[attributes] = NumberDataPoint.new( + attributes, + nil, + nil, + increment, + nil + ) + return + end + + # Check cardinality limit for new attribute sets + if data_points.size >= cardinality_limit + # Overflow: aggregate into overflow data point + @overflow_started = true + data_points[OVERFLOW_ATTRIBUTE_SET] = NumberDataPoint.new( + OVERFLOW_ATTRIBUTE_SET, + nil, + nil, + increment, + nil + ) + return + end + + # Normal case - create new data point data_points[attributes] = NumberDataPoint.new( attributes, nil, @@ -30,6 +95,14 @@ def update(increment, attributes, data_points) ) nil end + + private + + def choose_last_value_subset(points, count) + # For LastValue, prefer most recently updated points + # Since we don't have timestamp tracking, use array order as proxy + points.last(count) + end end end end From 952fe112de212359632eb6704b27335ebde6097e Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Mon, 25 Aug 2025 12:04:47 -0400 Subject: [PATCH 03/13] add cardinality limit --- .../sdk/metrics/aggregation/drop.rb | 4 +- .../aggregation/explicit_bucket_histogram.rb | 48 +++-- .../exponential_bucket_histogram.rb | 34 ++-- .../sdk/metrics/aggregation/last_value.rb | 4 +- .../sdk/metrics/aggregation/sum.rb | 14 +- .../export/console_metric_pull_exporter.rb | 4 +- .../export/in_memory_metric_pull_exporter.rb | 4 +- .../sdk/metrics/export/metric_reader.rb | 5 +- .../state/asynchronous_metric_stream.rb | 8 +- .../sdk/metrics/state/metric_store.rb | 8 +- .../sdk/metrics/state/metric_stream.rb | 28 ++- .../sdk/metrics/aggregation/drop_test.rb | 16 +- .../explicit_bucket_histogram_test.rb | 177 +++++++++--------- .../exponential_bucket_histogram_test.rb | 55 +++--- .../metrics/aggregation/last_value_test.rb | 16 +- .../sdk/metrics/aggregation/sum_test.rb | 47 ++--- 16 files changed, 241 insertions(+), 231 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb index fc9dd4a12d..5d5273c558 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb @@ -10,11 +10,11 @@ module Metrics module Aggregation # Contains the implementation of the Drop aggregation class Drop - def collect(start_time, end_time, data_points, cardinality_limit: nil) + def collect(start_time, end_time, data_points, cardinality_limit) data_points.values.map!(&:dup) end - def update(increment, attributes, data_points, cardinality_limit: nil) + def update(increment, attributes, data_points, cardinality_limit) data_points[attributes] = NumberDataPoint.new( {}, 0, diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb index 7412508870..029f9ad532 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb @@ -10,7 +10,7 @@ module Metrics module Aggregation # Contains the implementation of the ExplicitBucketHistogram aggregation # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation - class ExplicitBucketHistogram + class ExplicitBucketHistogram # rubocop:disable Metrics/ClassLength OVERFLOW_ATTRIBUTE_SET = { 'otel.metric.overflow' => true }.freeze DEFAULT_BOUNDARIES = [0, 5, 10, 25, 50, 75, 100, 250, 500, 1000].freeze private_constant :DEFAULT_BOUNDARIES @@ -30,34 +30,32 @@ def initialize( @overflow_started = false end - def collect(start_time, end_time, data_points, cardinality_limit: 2000) + def collect(start_time, end_time, data_points, cardinality_limit) all_points = data_points.values # Apply cardinality limit - if all_points.size <= cardinality_limit - result = process_all_points(all_points, start_time, end_time) - else - result = process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) - end + result = if all_points.size <= cardinality_limit + process_all_points(all_points, start_time, end_time) + else + process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) + end data_points.clear if @aggregation_temporality.delta? result end - def update(amount, attributes, data_points, cardinality_limit: 2000) + def update(amount, attributes, data_points, cardinality_limit) # Check if we already have this attribute set if data_points.key?(attributes) hdp = data_points[attributes] - else + elsif data_points.size >= cardinality_limit # Check cardinality limit for new attribute sets - if data_points.size >= cardinality_limit - # Overflow: aggregate into overflow data point - @overflow_started = true - hdp = data_points[OVERFLOW_ATTRIBUTE_SET] || create_overflow_data_point(data_points) - else - # Normal case - create new data point - hdp = create_new_data_point(attributes, data_points) - end + @overflow_started = true + hdp = data_points[OVERFLOW_ATTRIBUTE_SET] || create_overflow_data_point(data_points) + # Overflow: aggregate into overflow data point + else + # Normal case - create new data point + hdp = create_new_data_point(attributes, data_points) end # Update the histogram data point @@ -137,10 +135,10 @@ def merge_histogram_points(points, start_time, end_time) merged.max = [merged.max, hdp.max].max if hdp.max # Merge bucket counts - if merged_bucket_counts && hdp.bucket_counts - hdp.bucket_counts.each_with_index do |count, index| - merged_bucket_counts[index] += count - end + next unless merged_bucket_counts && hdp.bucket_counts + + hdp.bucket_counts.each_with_index do |count, index| + merged_bucket_counts[index] += count end end @@ -195,10 +193,10 @@ def update_histogram_data_point(hdp, amount) hdp.sum += amount hdp.count += 1 - if @boundaries - bucket_index = @boundaries.bsearch_index { |i| i >= amount } || @boundaries.size - hdp.bucket_counts[bucket_index] += 1 - end + return unless @boundaries + + bucket_index = @boundaries.bsearch_index { |i| i >= amount } || @boundaries.size + hdp.bucket_counts[bucket_index] += 1 end def empty_bucket_counts diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb index 28ac9c3da1..d3cae3341a 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb @@ -49,42 +49,38 @@ def initialize( @mapping = new_mapping(@scale) end - def collect(start_time, end_time, data_points, cardinality_limit: 2000) + def collect(start_time, end_time, data_points, cardinality_limit) all_points = data_points.values # Apply cardinality limit - if all_points.size <= cardinality_limit - result = process_all_points(all_points, start_time, end_time) - else - result = process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) - end + result = if all_points.size <= cardinality_limit + process_all_points(all_points, start_time, end_time) + else + process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) + end data_points.clear if @aggregation_temporality.delta? result end - # rubocop:disable Metrics/MethodLength - def update(amount, attributes, data_points, cardinality_limit: 2000) + def update(amount, attributes, data_points, cardinality_limit) # Check if we already have this attribute set if data_points.key?(attributes) hdp = data_points[attributes] - else + elsif data_points.size >= cardinality_limit # Check cardinality limit for new attribute sets - if data_points.size >= cardinality_limit - # Overflow: aggregate into overflow data point - @overflow_started = true - hdp = data_points[OVERFLOW_ATTRIBUTE_SET] || create_overflow_data_point(data_points) - else - # Normal case - create new data point - hdp = create_new_data_point(attributes, data_points) - end + @overflow_started = true + hdp = data_points[OVERFLOW_ATTRIBUTE_SET] || create_overflow_data_point(data_points) + # Overflow: aggregate into overflow data point + else + # Normal case - create new data point + hdp = create_new_data_point(attributes, data_points) end # Update the histogram data point with the new amount update_histogram_data_point(hdp, amount) nil end - # rubocop:enable Metrics/MethodLength private @@ -157,7 +153,7 @@ def merge_histogram_points(points, start_time, end_time) merged.min = [merged.min, hdp.min].min if hdp.min merged.max = [merged.max, hdp.max].max if hdp.max - # Note: Merging buckets would require complex logic to handle different scales + # NOTE: Merging buckets would require complex logic to handle different scales # For simplicity, we aggregate the counts and sums end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb index cb5ab13716..d59954c067 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb @@ -16,7 +16,7 @@ def initialize @overflow_started = false end - def collect(start_time, end_time, data_points, cardinality_limit: 2000) + def collect(start_time, end_time, data_points, cardinality_limit) # Apply cardinality limit by choosing subset + overflow for LastValue all_points = data_points.values @@ -57,7 +57,7 @@ def collect(start_time, end_time, data_points, cardinality_limit: 2000) ndps end - def update(increment, attributes, data_points, cardinality_limit: 2000) + def update(increment, attributes, data_points, cardinality_limit) # Check if we already have this attribute set if data_points.key?(attributes) # Update existing data point (LastValue behavior - replace) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb index 7de732f83f..addda0e590 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb @@ -4,19 +4,21 @@ # # SPDX-License-Identifier: Apache-2.0 +require 'set' + module OpenTelemetry module SDK module Metrics module Aggregation # Contains the implementation of the Sum aggregation - class Sum + class Sum # rubocop:disable Metrics/ClassLength OVERFLOW_ATTRIBUTE_SET = { 'otel.metric.overflow' => true }.freeze def initialize(aggregation_temporality: ENV.fetch('OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE', :cumulative), monotonic: false, instrument_kind: nil) @aggregation_temporality = AggregationTemporality.determine_temporality(aggregation_temporality: aggregation_temporality, instrument_kind: instrument_kind, default: :cumulative) @monotonic = monotonic @overflow_started = false - @pre_overflow_attributes = Set.new if @aggregation_temporality.cumulative? + @pre_overflow_attributes = ::Set.new if @aggregation_temporality.cumulative? end def collect(start_time, end_time, data_points, cardinality_limit) @@ -47,12 +49,10 @@ def update(increment, attributes, data_points, cardinality_limit) create_new_data_point(attributes, increment, data_points) return end - else + elsif data_points.size < cardinality_limit # For delta: simple size check - if data_points.size < cardinality_limit - create_new_data_point(attributes, increment, data_points) - return - end + create_new_data_point(attributes, increment, data_points) + return end # Overflow case: aggregate into overflow data point diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/console_metric_pull_exporter.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/console_metric_pull_exporter.rb index 8264c8e2e0..4adc440ea3 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/console_metric_pull_exporter.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/console_metric_pull_exporter.rb @@ -12,8 +12,8 @@ module Export # # Potentially useful for exploratory purposes. class ConsoleMetricPullExporter < MetricReader - def initialize - super + def initialize(aggregation_cardinality_limit: nil) + super(aggregation_cardinality_limit: aggregation_cardinality_limit) @stopped = false end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/in_memory_metric_pull_exporter.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/in_memory_metric_pull_exporter.rb index d0d8ccb902..9701c904c9 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/in_memory_metric_pull_exporter.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/in_memory_metric_pull_exporter.rb @@ -13,8 +13,8 @@ module Export class InMemoryMetricPullExporter < MetricReader attr_reader :metric_snapshots - def initialize - super + def initialize(aggregation_cardinality_limit: nil) + super(aggregation_cardinality_limit: aggregation_cardinality_limit) @metric_snapshots = [] @mutex = Mutex.new end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/metric_reader.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/metric_reader.rb index 9969f3a8ab..af44898eed 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/metric_reader.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/metric_reader.rb @@ -15,12 +15,11 @@ class MetricReader attr_reader :metric_store def initialize(aggregation_cardinality_limit: nil) - @metric_store = OpenTelemetry::SDK::Metrics::State::MetricStore.new - @cardinality_limit = aggregation_cardinality_limit + @metric_store = OpenTelemetry::SDK::Metrics::State::MetricStore.new(cardinality_limit: aggregation_cardinality_limit) end def collect - @metric_store.collect(cardinality_limit: @cardinality_limit) + @metric_store.collect end def shutdown(timeout: nil) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb index 759239cf41..9dee03cf17 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb @@ -47,23 +47,27 @@ def collect(start_time, end_time) def invoke_callback(timeout, attributes) if @registered_views.empty? + + resolved_cardinality_limit = @cardinality_limit || DEFAULT_CARDINALITY_LIMIT @mutex.synchronize do Timeout.timeout(timeout || 30) do @callback.each do |cb| value = cb.call - @default_aggregation.update(value, attributes, @data_points) + @default_aggregation.update(value, attributes, @data_points, resolved_cardinality_limit) end end end else @registered_views.each do |view| + resolved_cardinality_limit = resolve_cardinality_limit(view) + @mutex.synchronize do Timeout.timeout(timeout || 30) do @callback.each do |cb| value = cb.call merged_attributes = attributes || {} merged_attributes.merge!(view.attribute_keys) - view.aggregation.update(value, merged_attributes, @data_points) if view.valid_aggregation? + view.aggregation.update(value, merged_attributes, @data_points, resolved_cardinality_limit) if view.valid_aggregation? end end end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_store.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_store.rb index 53848f33d5..9e0786c9ea 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_store.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_store.rb @@ -13,17 +13,18 @@ module State # The MetricStore module provides SDK internal functionality that is not a part of the # public API. class MetricStore - def initialize + def initialize(cardinality_limit: nil) @mutex = Mutex.new @epoch_start_time = now_in_nano @epoch_end_time = nil @metric_streams = [] + @cardinality_limit = cardinality_limit end - def collect(cardinality_limit: nil) + def collect @mutex.synchronize do @epoch_end_time = now_in_nano - snapshot = @metric_streams.flat_map { |ms| ms.collect(@epoch_start_time, @epoch_end_time, cardinality_limit: cardinality_limit) } + snapshot = @metric_streams.flat_map { |ms| ms.collect(@epoch_start_time, @epoch_end_time) } @epoch_start_time = @epoch_end_time snapshot @@ -32,6 +33,7 @@ def collect(cardinality_limit: nil) def add_metric_stream(metric_stream) @mutex.synchronize do + metric_stream.cardinality_limit = @cardinality_limit @metric_streams = @metric_streams.dup.push(metric_stream) nil end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb index 5a17bdf749..e645025d24 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb @@ -14,6 +14,7 @@ module State # public API. class MetricStream attr_reader :name, :description, :unit, :instrument_kind, :instrumentation_scope, :data_points + attr_writer :cardinality_limit DEFAULT_CARDINALITY_LIMIT = 2000 @@ -40,7 +41,11 @@ def initialize( @mutex = Mutex.new end - def collect(start_time, end_time, cardinality_limit: nil) + # this cardinality_limit is from exporter.new(cardinality_limit: cardinality_limit) + # -> metric_reader.collect(...cardinality_limit) + # -> metric_store.collect(...cardinality_limit) + # -> metric_stream.collect(...cardinality_limit) + def collect(start_time, end_time) @mutex.synchronize do metric_data = [] @@ -48,13 +53,13 @@ def collect(start_time, end_time, cardinality_limit: nil) return metric_data if @data_points.empty? if @registered_views.empty? - resolved_cardinality_limit = resolve_cardinality_limit(view: nil, cardinality_limit: cardinality_limit) + resolved_cardinality_limit = @cardinality_limit || DEFAULT_CARDINALITY_LIMIT metric_data << aggregate_metric_data(start_time, end_time, resolved_cardinality_limit) else @registered_views.each do |view| - resolved_cardinality_limit = resolve_cardinality_limit(view: view, cardinality_limit: cardinality_limit) + resolved_cardinality_limit = resolve_cardinality_limit(view) metric_data << aggregate_metric_data(start_time, end_time, resolved_cardinality_limit, @@ -69,15 +74,20 @@ def collect(start_time, end_time, cardinality_limit: nil) # view has the cardinality, pass to aggregation update # to determine if aggregation have the cardinality # if the aggregation does not have the cardinality, then it will be default 2000 + # it better to move overflowed data_points during update because if do it in collect, + # then we need to sort the entire data_points (~ 2000) based on time, which is time-consuming def update(value, attributes) if @registered_views.empty? - @mutex.synchronize { @default_aggregation.update(value, attributes, @data_points) } + resolved_cardinality_limit = @cardinality_limit || DEFAULT_CARDINALITY_LIMIT + + @mutex.synchronize { @default_aggregation.update(value, attributes, @data_points, resolved_cardinality_limit) } else @registered_views.each do |view| + resolved_cardinality_limit = resolve_cardinality_limit(view) @mutex.synchronize do attributes ||= {} attributes.merge!(view.attribute_keys) - view.aggregation.update(value, attributes, @data_points) if view.valid_aggregation? + view.aggregation.update(value, attributes, @data_points, resolved_cardinality_limit) if view.valid_aggregation? end end end @@ -95,7 +105,7 @@ def aggregate_metric_data(start_time, end_time, cardinality_limit, aggregation: @instrument_kind, @meter_provider.resource, @instrumentation_scope, - aggregator.collect(start_time, end_time, @data_points, cardinality_limit: cardinality_limit), + aggregator.collect(start_time, end_time, @data_points, cardinality_limit), aggregation_temporality, start_time, end_time, @@ -109,10 +119,8 @@ def find_registered_view @meter_provider.registered_views.each { |view| @registered_views << view if view.match_instrument?(self) } end - def resolve_cardinality_limit(view: nil, cardinality_limit: nil) - return view.aggregation_cardinality_limit if view&.aggregation_cardinality_limit - return cardinality_limit if cardinality_limit - DEFAULT_CARDINALITY_LIMIT + def resolve_cardinality_limit(view) + view.aggregation_cardinality_limit || @cardinality_limit || DEFAULT_CARDINALITY_LIMIT end def to_s diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb index b2212b462a..a94d056c38 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb @@ -10,7 +10,7 @@ let(:data_points) { {} } let(:drop_aggregation) { OpenTelemetry::SDK::Metrics::Aggregation::Drop.new } let(:aggregation_temporality) { :delta } - + let(:cardinality_limit) { 2000 } # Time in nano let(:start_time) { (Time.now.to_r * 1_000_000_000).to_i } let(:end_time) { ((Time.now + 60).to_r * 1_000_000_000).to_i } @@ -20,20 +20,20 @@ end it 'sets the timestamps' do - drop_aggregation.update(0, {}, data_points) - ndp = drop_aggregation.collect(start_time, end_time, data_points)[0] + drop_aggregation.update(0, {}, data_points, cardinality_limit) + ndp = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit)[0] _(ndp.start_time_unix_nano).must_equal(0) _(ndp.time_unix_nano).must_equal(0) end it 'aggregates and collects should collect no value for all collection' do - drop_aggregation.update(1, {}, data_points) - drop_aggregation.update(2, {}, data_points) + drop_aggregation.update(1, {}, data_points, cardinality_limit) + drop_aggregation.update(2, {}, data_points, cardinality_limit) - drop_aggregation.update(2, { 'foo' => 'bar' }, data_points) - drop_aggregation.update(2, { 'foo' => 'bar' }, data_points) + drop_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) + drop_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) - ndps = drop_aggregation.collect(start_time, end_time, data_points) + ndps = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit) _(ndps.size).must_equal(2) _(ndps[0].value).must_equal(0) diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb index 8dc7c2eabe..b9b0e7707a 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb @@ -21,6 +21,7 @@ # Time in nano let(:start_time) { (Time.now.to_r * 1_000_000_000).to_i } let(:end_time) { ((Time.now + 60).to_r * 1_000_000_000).to_i } + let(:cardinality_limit) { 2000 } describe '#initialize' do it 'defaults to the delta aggregation temporality' do @@ -91,19 +92,19 @@ describe '#collect' do it 'returns all the data points' do - ebh.update(0, {}, data_points) - ebh.update(1, {}, data_points) - ebh.update(5, {}, data_points) - ebh.update(6, {}, data_points) - ebh.update(10, {}, data_points) - - ebh.update(-10, { 'foo' => 'bar' }, data_points) - ebh.update(1, { 'foo' => 'bar' }, data_points) - ebh.update(22, { 'foo' => 'bar' }, data_points) - ebh.update(55, { 'foo' => 'bar' }, data_points) - ebh.update(80, { 'foo' => 'bar' }, data_points) - - hdps = ebh.collect(start_time, end_time, data_points) + ebh.update(0, {}, data_points, cardinality_limit) + ebh.update(1, {}, data_points, cardinality_limit) + ebh.update(5, {}, data_points, cardinality_limit) + ebh.update(6, {}, data_points, cardinality_limit) + ebh.update(10, {}, data_points, cardinality_limit) + + ebh.update(-10, { 'foo' => 'bar' }, data_points, cardinality_limit) + ebh.update(1, { 'foo' => 'bar' }, data_points, cardinality_limit) + ebh.update(22, { 'foo' => 'bar' }, data_points, cardinality_limit) + ebh.update(55, { 'foo' => 'bar' }, data_points, cardinality_limit) + ebh.update(80, { 'foo' => 'bar' }, data_points, cardinality_limit) + + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) _(hdps.size).must_equal(2) _(hdps[0].attributes).must_equal({}) _(hdps[0].count).must_equal(5) @@ -123,34 +124,34 @@ end it 'sets the timestamps' do - ebh.update(0, {}, data_points) - hdp = ebh.collect(start_time, end_time, data_points)[0] + ebh.update(0, {}, data_points, cardinality_limit) + hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] _(hdp.start_time_unix_nano).must_equal(start_time) _(hdp.time_unix_nano).must_equal(end_time) end it 'calculates the count' do - ebh.update(0, {}, data_points) - ebh.update(0, {}, data_points) - ebh.update(0, {}, data_points) - ebh.update(0, {}, data_points) - hdp = ebh.collect(start_time, end_time, data_points)[0] + ebh.update(0, {}, data_points, cardinality_limit) + ebh.update(0, {}, data_points, cardinality_limit) + ebh.update(0, {}, data_points, cardinality_limit) + ebh.update(0, {}, data_points, cardinality_limit) + hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] _(hdp.count).must_equal(4) end it 'does not aggregate between collects with default delta aggregation' do - ebh.update(0, {}, data_points) - ebh.update(1, {}, data_points) - ebh.update(5, {}, data_points) - ebh.update(6, {}, data_points) - ebh.update(10, {}, data_points) - hdps = ebh.collect(start_time, end_time, data_points) - - ebh.update(0, {}, data_points) - ebh.update(1, {}, data_points) - ebh.update(5, {}, data_points) - ebh.update(6, {}, data_points) - ebh.update(10, {}, data_points) + ebh.update(0, {}, data_points, cardinality_limit) + ebh.update(1, {}, data_points, cardinality_limit) + ebh.update(5, {}, data_points, cardinality_limit) + ebh.update(6, {}, data_points, cardinality_limit) + ebh.update(10, {}, data_points, cardinality_limit) + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + + ebh.update(0, {}, data_points, cardinality_limit) + ebh.update(1, {}, data_points, cardinality_limit) + ebh.update(5, {}, data_points, cardinality_limit) + ebh.update(6, {}, data_points, cardinality_limit) + ebh.update(10, {}, data_points, cardinality_limit) # Assert that the recent update does not # impact the already collected metrics _(hdps[0].count).must_equal(5) @@ -159,7 +160,7 @@ _(hdps[0].max).must_equal(10) _(hdps[0].bucket_counts).must_equal([1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0]) - hdps = ebh.collect(start_time, end_time, data_points) + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) # Assert that we are not accumulating values # between calls to collect _(hdps[0].count).must_equal(5) @@ -173,18 +174,18 @@ let(:aggregation_temporality) { :not_delta } it 'allows metrics to accumulate' do - ebh.update(0, {}, data_points) - ebh.update(1, {}, data_points) - ebh.update(5, {}, data_points) - ebh.update(6, {}, data_points) - ebh.update(10, {}, data_points) - hdps = ebh.collect(start_time, end_time, data_points) - - ebh.update(0, {}, data_points) - ebh.update(1, {}, data_points) - ebh.update(5, {}, data_points) - ebh.update(6, {}, data_points) - ebh.update(10, {}, data_points) + ebh.update(0, {}, data_points, cardinality_limit) + ebh.update(1, {}, data_points, cardinality_limit) + ebh.update(5, {}, data_points, cardinality_limit) + ebh.update(6, {}, data_points, cardinality_limit) + ebh.update(10, {}, data_points, cardinality_limit) + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + + ebh.update(0, {}, data_points, cardinality_limit) + ebh.update(1, {}, data_points, cardinality_limit) + ebh.update(5, {}, data_points, cardinality_limit) + ebh.update(6, {}, data_points, cardinality_limit) + ebh.update(10, {}, data_points, cardinality_limit) # Assert that the recent update does not # impact the already collected metrics _(hdps[0].count).must_equal(5) @@ -193,7 +194,7 @@ _(hdps[0].max).must_equal(10) _(hdps[0].bucket_counts).must_equal([1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0]) - hdps1 = ebh.collect(start_time, end_time, data_points) + hdps1 = ebh.collect(start_time, end_time, data_points, cardinality_limit) # Assert that we are accumulating values # and not just capturing the delta since # the previous collect call @@ -216,38 +217,38 @@ describe '#update' do it 'accumulates across the default boundaries' do - ebh.update(0, {}, data_points) + ebh.update(0, {}, data_points, cardinality_limit) - ebh.update(1, {}, data_points) - ebh.update(5, {}, data_points) + ebh.update(1, {}, data_points, cardinality_limit) + ebh.update(5, {}, data_points, cardinality_limit) - ebh.update(6, {}, data_points) - ebh.update(10, {}, data_points) + ebh.update(6, {}, data_points, cardinality_limit) + ebh.update(10, {}, data_points, cardinality_limit) - ebh.update(11, {}, data_points) - ebh.update(25, {}, data_points) + ebh.update(11, {}, data_points, cardinality_limit) + ebh.update(25, {}, data_points, cardinality_limit) - ebh.update(26, {}, data_points) - ebh.update(50, {}, data_points) + ebh.update(26, {}, data_points, cardinality_limit) + ebh.update(50, {}, data_points, cardinality_limit) - ebh.update(51, {}, data_points) - ebh.update(75, {}, data_points) + ebh.update(51, {}, data_points, cardinality_limit) + ebh.update(75, {}, data_points, cardinality_limit) - ebh.update(76, {}, data_points) - ebh.update(100, {}, data_points) + ebh.update(76, {}, data_points, cardinality_limit) + ebh.update(100, {}, data_points, cardinality_limit) - ebh.update(101, {}, data_points) - ebh.update(250, {}, data_points) + ebh.update(101, {}, data_points, cardinality_limit) + ebh.update(250, {}, data_points, cardinality_limit) - ebh.update(251, {}, data_points) - ebh.update(500, {}, data_points) + ebh.update(251, {}, data_points, cardinality_limit) + ebh.update(500, {}, data_points, cardinality_limit) - ebh.update(501, {}, data_points) - ebh.update(1000, {}, data_points) + ebh.update(501, {}, data_points, cardinality_limit) + ebh.update(1000, {}, data_points, cardinality_limit) - ebh.update(1001, {}, data_points) + ebh.update(1001, {}, data_points, cardinality_limit) - hdp = ebh.collect(start_time, end_time, data_points)[0] + hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] _(hdp.bucket_counts).must_equal([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]) _(hdp.sum).must_equal(4040) _(hdp.min).must_equal(0) @@ -258,8 +259,8 @@ let(:boundaries) { [4, 2, 1] } it 'sorts it' do - ebh.update(0, {}, data_points) - _(ebh.collect(start_time, end_time, data_points)[0].explicit_bounds).must_equal([1, 2, 4]) + ebh.update(0, {}, data_points, cardinality_limit) + _(ebh.collect(start_time, end_time, data_points, cardinality_limit)[0].explicit_bounds).must_equal([1, 2, 4]) end end @@ -267,8 +268,8 @@ let(:record_min_max) { false } it 'does not record min max values' do - ebh.update(-1, {}, data_points) - hdp = ebh.collect(start_time, end_time, data_points)[0] + ebh.update(-1, {}, data_points, cardinality_limit) + hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] _(hdp.min).must_be_nil _(hdp.min).must_be_nil end @@ -278,14 +279,14 @@ let(:boundaries) { [0, 2, 4] } it 'aggregates' do - ebh.update(-1, {}, data_points) - ebh.update(0, {}, data_points) - ebh.update(1, {}, data_points) - ebh.update(2, {}, data_points) - ebh.update(3, {}, data_points) - ebh.update(4, {}, data_points) - ebh.update(5, {}, data_points) - hdp = ebh.collect(start_time, end_time, data_points)[0] + ebh.update(-1, {}, data_points, cardinality_limit) + ebh.update(0, {}, data_points, cardinality_limit) + ebh.update(1, {}, data_points, cardinality_limit) + ebh.update(2, {}, data_points, cardinality_limit) + ebh.update(3, {}, data_points, cardinality_limit) + ebh.update(4, {}, data_points, cardinality_limit) + ebh.update(5, {}, data_points, cardinality_limit) + hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] _(hdp.bucket_counts).must_equal([2, 2, 2, 1]) end @@ -295,9 +296,9 @@ let(:boundaries) { [0] } it 'aggregates' do - ebh.update(-1, {}, data_points) - ebh.update(1, {}, data_points) - hdp = ebh.collect(start_time, end_time, data_points)[0] + ebh.update(-1, {}, data_points, cardinality_limit) + ebh.update(1, {}, data_points, cardinality_limit) + hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] _(hdp.bucket_counts).must_equal([1, 1]) end @@ -307,9 +308,9 @@ let(:boundaries) { [] } it 'aggregates but does not record bucket counts' do - ebh.update(-1, {}, data_points) - ebh.update(3, {}, data_points) - hdp = ebh.collect(start_time, end_time, data_points)[0] + ebh.update(-1, {}, data_points, cardinality_limit) + ebh.update(3, {}, data_points, cardinality_limit) + hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] _(hdp.bucket_counts).must_be_nil _(hdp.explicit_bounds).must_be_nil @@ -324,9 +325,9 @@ let(:boundaries) { nil } it 'aggregates but does not record bucket counts' do - ebh.update(-1, {}, data_points) - ebh.update(3, {}, data_points) - hdp = ebh.collect(start_time, end_time, data_points)[0] + ebh.update(-1, {}, data_points, cardinality_limit) + ebh.update(3, {}, data_points, cardinality_limit) + hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] _(hdp.bucket_counts).must_be_nil _(hdp.explicit_bounds).must_be_nil diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb index 0b09aeb615..096c55b45f 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb @@ -25,17 +25,18 @@ # Time in nano let(:start_time) { Process.clock_gettime(Process::CLOCK_REALTIME, :nanosecond) } let(:end_time) { Process.clock_gettime(Process::CLOCK_REALTIME, :nanosecond) + (60 * 1_000_000_000) } + let(:cardinality_limit) { 2000 } describe '#collect' do it 'returns all the data points' do - expbh.update(1.03, {}, data_points) - expbh.update(1.23, {}, data_points) - expbh.update(0, {}, data_points) + expbh.update(1.03, {}, data_points, cardinality_limit) + expbh.update(1.23, {}, data_points, cardinality_limit) + expbh.update(0, {}, data_points, cardinality_limit) - expbh.update(1.45, { 'foo' => 'bar' }, data_points) - expbh.update(1.67, { 'foo' => 'bar' }, data_points) + expbh.update(1.45, { 'foo' => 'bar' }, data_points, cardinality_limit) + expbh.update(1.67, { 'foo' => 'bar' }, data_points, cardinality_limit) - exphdps = expbh.collect(start_time, end_time, data_points) + exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) _(exphdps.size).must_equal(2) _(exphdps[0].attributes).must_equal({}) @@ -80,11 +81,11 @@ zero_threshold: 0 ) - expbh.update(2, {}, data_points) - expbh.update(4, {}, data_points) - expbh.update(1, {}, data_points) + expbh.update(2, {}, data_points, cardinality_limit) + expbh.update(4, {}, data_points, cardinality_limit) + expbh.update(1, {}, data_points, cardinality_limit) - exphdps = expbh.collect(start_time, end_time, data_points) + exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) _(exphdps.size).must_equal(1) _(exphdps[0].attributes).must_equal({}) @@ -113,14 +114,14 @@ zero_threshold: 0 ) - expbh.update(2, {}, data_points) - expbh.update(2, {}, data_points) - expbh.update(2, {}, data_points) - expbh.update(1, {}, data_points) - expbh.update(8, {}, data_points) - expbh.update(0.5, {}, data_points) + expbh.update(2, {}, data_points, cardinality_limit) + expbh.update(2, {}, data_points, cardinality_limit) + expbh.update(2, {}, data_points, cardinality_limit) + expbh.update(1, {}, data_points, cardinality_limit) + expbh.update(8, {}, data_points, cardinality_limit) + expbh.update(0.5, {}, data_points, cardinality_limit) - exphdps = expbh.collect(start_time, end_time, data_points) + exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) _(exphdps.size).must_equal(1) _(exphdps[0].attributes).must_equal({}) @@ -181,10 +182,10 @@ ) permutation.each do |value| - expbh.update(value, {}, data_points) + expbh.update(value, {}, data_points, cardinality_limit) end - exphdps = expbh.collect(start_time, end_time, data_points) + exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) assert_equal expected[:scale], exphdps[0].scale assert_equal expected[:offset], exphdps[0].positive.offset @@ -204,11 +205,11 @@ zero_threshold: 0 ) - expbh.update(Float::MAX, {}, data_points) - expbh.update(1, {}, data_points) - expbh.update(2**-1074, {}, data_points) + expbh.update(Float::MAX, {}, data_points, cardinality_limit) + expbh.update(1, {}, data_points, cardinality_limit) + expbh.update(2**-1074, {}, data_points, cardinality_limit) - exphdps = expbh.collect(start_time, end_time, data_points) + exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) assert_equal Float::MAX, exphdps[0].sum assert_equal 3, exphdps[0].count @@ -228,10 +229,10 @@ ) [1, 3, 5, 7, 9].each do |value| - expbh.update(value, {}, data_points) + expbh.update(value, {}, data_points, cardinality_limit) end - exphdps = expbh.collect(start_time, end_time, data_points) + exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) assert_equal 1, exphdps[0].min assert_equal 9, exphdps[0].max @@ -243,10 +244,10 @@ ) [-1, -3, -5, -7, -9].each do |value| - expbh.update(value, {}, data_points) + expbh.update(value, {}, data_points, cardinality_limit) end - exphdps = expbh.collect(start_time, end_time, data_points) + exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) assert_equal(-9, exphdps[0].min) assert_equal(-1, exphdps[0].max) diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb index 6e606d7462..9c6f922c81 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb @@ -9,26 +9,26 @@ describe OpenTelemetry::SDK::Metrics::Aggregation::LastValue do let(:data_points) { {} } let(:last_value_aggregation) { OpenTelemetry::SDK::Metrics::Aggregation::LastValue.new } - + let(:cardinality_limit) { 2000 } # Time in nano let(:start_time) { (Time.now.to_r * 1_000_000_000).to_i } let(:end_time) { ((Time.now + 60).to_r * 1_000_000_000).to_i } it 'sets the timestamps' do - last_value_aggregation.update(0, {}, data_points) - ndp = last_value_aggregation.collect(start_time, end_time, data_points)[0] + last_value_aggregation.update(0, {}, data_points, cardinality_limit) + ndp = last_value_aggregation.collect(start_time, end_time, data_points, cardinality_limit)[0] _(ndp.start_time_unix_nano).must_equal(start_time) _(ndp.time_unix_nano).must_equal(end_time) end it 'aggregates and collects should collect the last value' do - last_value_aggregation.update(1, {}, data_points) - last_value_aggregation.update(2, {}, data_points) + last_value_aggregation.update(1, {}, data_points, cardinality_limit) + last_value_aggregation.update(2, {}, data_points, cardinality_limit) - last_value_aggregation.update(2, { 'foo' => 'bar' }, data_points) - last_value_aggregation.update(2, { 'foo' => 'bar' }, data_points) + last_value_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) + last_value_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) - ndps = last_value_aggregation.collect(start_time, end_time, data_points) + ndps = last_value_aggregation.collect(start_time, end_time, data_points, cardinality_limit) _(ndps[0].value).must_equal(2) _(ndps[0].attributes).must_equal({}, data_points) diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb index 870f7933bb..d75816c833 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb @@ -11,6 +11,7 @@ let(:sum_aggregation) { OpenTelemetry::SDK::Metrics::Aggregation::Sum.new(aggregation_temporality:, monotonic:) } let(:aggregation_temporality) { :delta } let(:monotonic) { false } + let(:cardinality_limit) { 2000 } # Time in nano let(:start_time) { (Time.now.to_r * 1_000_000_000).to_i } @@ -84,20 +85,20 @@ end it 'sets the timestamps' do - sum_aggregation.update(0, {}, data_points) - ndp = sum_aggregation.collect(start_time, end_time, data_points)[0] + sum_aggregation.update(0, {}, data_points, cardinality_limit) + ndp = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit)[0] _(ndp.start_time_unix_nano).must_equal(start_time) _(ndp.time_unix_nano).must_equal(end_time) end it 'aggregates and collects' do - sum_aggregation.update(1, {}, data_points) - sum_aggregation.update(2, {}, data_points) + sum_aggregation.update(1, {}, data_points, cardinality_limit) + sum_aggregation.update(2, {}, data_points, cardinality_limit) - sum_aggregation.update(2, { 'foo' => 'bar' }, data_points) - sum_aggregation.update(2, { 'foo' => 'bar' }, data_points) + sum_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) + sum_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) - ndps = sum_aggregation.collect(start_time, end_time, data_points) + ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) _(ndps[0].value).must_equal(3) _(ndps[0].attributes).must_equal({}, data_points) @@ -106,24 +107,24 @@ end it 'aggregates and collects negative values' do - sum_aggregation.update(1, {}, data_points) - sum_aggregation.update(-2, {}, data_points) + sum_aggregation.update(1, {}, data_points, cardinality_limit) + sum_aggregation.update(-2, {}, data_points, cardinality_limit) - ndps = sum_aggregation.collect(start_time, end_time, data_points) + ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) _(ndps[0].value).must_equal(-1) end it 'does not aggregate between collects' do - sum_aggregation.update(1, {}, data_points) - sum_aggregation.update(2, {}, data_points) - ndps = sum_aggregation.collect(start_time, end_time, data_points) + sum_aggregation.update(1, {}, data_points, cardinality_limit) + sum_aggregation.update(2, {}, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) - sum_aggregation.update(1, {}, data_points) + sum_aggregation.update(1, {}, data_points, cardinality_limit) # Assert that the recent update does not # impact the already collected metrics _(ndps[0].value).must_equal(3) - ndps = sum_aggregation.collect(start_time, end_time, data_points) + ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) # Assert that we are not accumulating values # between calls to collect _(ndps[0].value).must_equal(1) @@ -133,16 +134,16 @@ let(:aggregation_temporality) { :not_delta } it 'allows metrics to accumulate' do - sum_aggregation.update(1, {}, data_points) - sum_aggregation.update(2, {}, data_points) - ndps = sum_aggregation.collect(start_time, end_time, data_points) + sum_aggregation.update(1, {}, data_points, cardinality_limit) + sum_aggregation.update(2, {}, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) - sum_aggregation.update(1, {}, data_points) + sum_aggregation.update(1, {}, data_points, cardinality_limit) # Assert that the recent update does not # impact the already collected metrics _(ndps[0].value).must_equal(3) - ndps = sum_aggregation.collect(start_time, end_time, data_points) + ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) # Assert that we are accumulating values # and not just capturing the delta since # the previous collect call @@ -155,9 +156,9 @@ let(:monotonic) { true } it 'does not allow negative values to accumulate' do - sum_aggregation.update(1, {}, data_points) - sum_aggregation.update(-2, {}, data_points) - ndps = sum_aggregation.collect(start_time, end_time, data_points) + sum_aggregation.update(1, {}, data_points, cardinality_limit) + sum_aggregation.update(-2, {}, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) _(ndps[0].value).must_equal(1) end From 9cb4cb56fc366a8aaa553984af6897959275aad2 Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Mon, 25 Aug 2025 14:57:44 -0400 Subject: [PATCH 04/13] add basic cardinality test for aggregtion --- .../aggregation/explicit_bucket_histogram.rb | 9 +--- .../exponential_bucket_histogram.rb | 7 +--- .../sdk/metrics/aggregation/sum.rb | 9 +--- .../sdk/metrics/aggregation/drop_test.rb | 18 ++++++++ .../explicit_bucket_histogram_test.rb | 34 +++++++++++++++ .../exponential_bucket_histogram_test.rb | 42 ++++++++++++++++--- .../metrics/aggregation/last_value_test.rb | 28 +++++++++++++ .../sdk/metrics/aggregation/sum_test.rb | 34 +++++++++++++++ 8 files changed, 155 insertions(+), 26 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb index 029f9ad532..5fa0a608ce 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb @@ -90,8 +90,8 @@ def process_all_points(all_points, start_time, end_time) end def process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) - # Choose subset of histograms (prefer those with higher counts) - selected_points = choose_histogram_subset(all_points, cardinality_limit - 1) + # Choose subset of histograms (current strategy just select first observed n data_point) + selected_points = all_points.first(cardinality_limit) remaining_points = all_points - selected_points result = process_all_points(selected_points, start_time, end_time) @@ -105,11 +105,6 @@ def process_with_cardinality_limit(all_points, start_time, end_time, cardinality result end - def choose_histogram_subset(points, count) - # Strategy: keep histograms with highest counts (most data) - points.sort_by { |hdp| -hdp.count }.first(count) - end - def merge_histogram_points(points, start_time, end_time) # Create a merged histogram with overflow attributes merged_bucket_counts = empty_bucket_counts diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb index d3cae3341a..e3542a6122 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb @@ -107,7 +107,7 @@ def process_all_points(all_points, start_time, end_time) def process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) # Choose subset of histograms (prefer those with higher counts) - selected_points = choose_histogram_subset(all_points, cardinality_limit - 1) + selected_points = all_points.first(cardinality_limit) remaining_points = all_points - selected_points result = process_all_points(selected_points, start_time, end_time) @@ -121,11 +121,6 @@ def process_with_cardinality_limit(all_points, start_time, end_time, cardinality result end - def choose_histogram_subset(points, count) - # Strategy: keep histograms with highest counts (most data) - points.sort_by { |hdp| -hdp.count }.first(count) - end - def merge_histogram_points(points, start_time, end_time) # Create a merged histogram with overflow attributes merged = ExponentialHistogramDataPoint.new( diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb index addda0e590..08714d4597 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb @@ -111,8 +111,8 @@ def collect_delta(start_time, end_time, data_points, cardinality_limit) ndp end else - # Apply cardinality limit by choosing subset + overflow - selected_points = choose_delta_subset(all_points, cardinality_limit - 1) + # Apply cardinality limit by choosing subset (first n data_point) + overflow + selected_points = all_points.first(cardinality_limit) remaining_points = all_points - selected_points result = selected_points.map do |ndp| @@ -146,11 +146,6 @@ def apply_cardinality_limit_to_result(result, cardinality_limit) # But as safety net, keep first N points result.first(cardinality_limit) end - - def choose_delta_subset(points, count) - # Strategy: keep points with highest absolute values - points.sort_by { |point| -point.value.abs }.first(count) - end end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb index a94d056c38..6f0cd3ecfe 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb @@ -42,4 +42,22 @@ _(ndps[1].value).must_equal(0) _(ndps[1].attributes).must_equal({}) end + + describe 'cardinality limit' do + let(:cardinality_limit) { 2 } + + it 'respects cardinality limit but still drops all values' do + drop_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) + drop_aggregation.update(20, { 'key' => 'b' }, data_points, cardinality_limit) + drop_aggregation.update(30, { 'key' => 'c' }, data_points, cardinality_limit) # Should be limited + drop_aggregation.update(40, { 'key' => 'd' }, data_points, cardinality_limit) # Should be limited + + ndps = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + + # All values should be 0 regardless of cardinality limit + ndps.each do |ndp| + _(ndp.value).must_equal(0) + end + end + end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb index b9b0e7707a..65483cc426 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb @@ -338,4 +338,38 @@ end end end + + describe 'cardinality limit' do + let(:cardinality_limit) { 2 } + + it 'creates overflow data point when cardinality limit is exceeded' do + ebh.update(1, { 'key' => 'a' }, data_points, cardinality_limit) + ebh.update(5, { 'key' => 'b' }, data_points, cardinality_limit) + ebh.update(10, { 'key' => 'c' }, data_points, cardinality_limit) # This should overflow + + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + + _(hdps.size).must_equal(3) + + overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + _(overflow_point).wont_be_nil + _(overflow_point.count).must_equal(1) + _(overflow_point.sum).must_equal(10) + end + + it 'merges multiple overflow measurements correctly' do + ebh.update(1, { 'key' => 'a' }, data_points, cardinality_limit) + ebh.update(5, { 'key' => 'b' }, data_points, cardinality_limit) + ebh.update(10, { 'key' => 'c' }, data_points, cardinality_limit) # Overflow + ebh.update(15, { 'key' => 'd' }, data_points, cardinality_limit) # Also overflow + + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + + _(hdps.size).must_equal(3) + + overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + _(overflow_point.count).must_equal(2) + _(overflow_point.sum).must_equal(25) # 10 + 15 + end + end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb index 096c55b45f..e2a07cd979 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb @@ -270,15 +270,45 @@ end it 'test_invalid_size_validation' do - error = assert_raises(ArgumentError) do - OpenTelemetry::SDK::Metrics::Aggregation::ExponentialBucketHistogram.new(max_size: 10_000_000) + assert_raises ArgumentError do + OpenTelemetry::SDK::Metrics::Aggregation::ExponentialBucketHistogram.new(max_size: 1) end - assert_equal('Max size 10000000 is larger than maximum size 16384', error.message) - error = assert_raises(ArgumentError) do - OpenTelemetry::SDK::Metrics::Aggregation::ExponentialBucketHistogram.new(max_size: 0) + assert_raises ArgumentError do + OpenTelemetry::SDK::Metrics::Aggregation::ExponentialBucketHistogram.new(max_size: 16_385) + end + end + + describe 'cardinality limit' do + let(:cardinality_limit) { 2 } + + it 'creates overflow data point when cardinality limit is exceeded' do + expbh.update(1.5, { 'key' => 'a' }, data_points, cardinality_limit) + expbh.update(2.5, { 'key' => 'b' }, data_points, cardinality_limit) + expbh.update(3.5, { 'key' => 'c' }, data_points, cardinality_limit) # This should overflow + + exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + + _(exphdps.size).must_equal(3) + + overflow_point = exphdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + _(overflow_point).wont_be_nil + _(overflow_point.count).must_equal(1) + _(overflow_point.sum).must_equal(3.5) + end + + it 'merges multiple overflow measurements correctly' do + expbh.update(1.5, { 'key' => 'a' }, data_points, cardinality_limit) + expbh.update(2.5, { 'key' => 'b' }, data_points, cardinality_limit) + expbh.update(3.5, { 'key' => 'c' }, data_points, cardinality_limit) # Overflow + expbh.update(4.5, { 'key' => 'd' }, data_points, cardinality_limit) # Also overflow + + exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + + overflow_point = exphdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + _(overflow_point.count).must_equal(2) + _(overflow_point.sum).must_equal(8.0) # 3.5 + 4.5 end - assert_equal('Max size 0 is smaller than minimum size 2', error.message) end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb index 9c6f922c81..8d1b472319 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb @@ -35,4 +35,32 @@ _(ndps[1].value).must_equal(2) _(ndps[1].attributes).must_equal('foo' => 'bar') end + + describe 'cardinality limit' do + let(:cardinality_limit) { 2 } + + it 'creates overflow data point when cardinality limit is exceeded' do + last_value_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) + last_value_aggregation.update(20, { 'key' => 'b' }, data_points, cardinality_limit) + last_value_aggregation.update(30, { 'key' => 'c' }, data_points, cardinality_limit) # This should overflow + + ndps = last_value_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + + _(ndps.size).must_equal(2) + + overflow_point = ndps.find { |ndp| ndp.attributes == { 'otel.metric.overflow' => true } } + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(30) + end + + it 'updates existing attribute sets without triggering overflow' do + last_value_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) + last_value_aggregation.update(20, { 'key' => 'b' }, data_points, cardinality_limit) + last_value_aggregation.update(15, { 'key' => 'a' }, data_points, cardinality_limit) # Update existing + + _(data_points.size).must_equal(2) + _(data_points[{ 'key' => 'a' }].value).must_equal(15) + _(data_points[{ 'key' => 'b' }].value).must_equal(20) + end + end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb index d75816c833..c0cba208a1 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb @@ -163,4 +163,38 @@ _(ndps[0].value).must_equal(1) end end + + describe 'cardinality limit' do + let(:cardinality_limit) { 3 } + + it 'creates overflow data point when cardinality limit is exceeded' do + sum_aggregation.update(1, { 'key' => 'a' }, data_points, cardinality_limit) + sum_aggregation.update(2, { 'key' => 'b' }, data_points, cardinality_limit) + sum_aggregation.update(3, { 'key' => 'c' }, data_points, cardinality_limit) + sum_aggregation.update(4, { 'key' => 'd' }, data_points, cardinality_limit) # This should overflow + + ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + + _(ndps.size).must_equal(4) + + overflow_point = ndps.find { |ndp| ndp.attributes == { 'otel.metric.overflow' => true } } + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(4) + end + + describe 'with cumulative aggregation' do + it 'preserves pre-overflow attributes after overflow starts' do + sum_aggregation.update(1, { 'key' => 'a' }, data_points, cardinality_limit) + sum_aggregation.update(2, { 'key' => 'b' }, data_points, cardinality_limit) + sum_aggregation.update(3, { 'key' => 'c' }, data_points, cardinality_limit) + sum_aggregation.update(4, { 'key' => 'd' }, data_points, cardinality_limit) # This should overflow + + # Add more to a pre-overflow attribute + sum_aggregation.update(5, { 'key' => 'a' }, data_points, cardinality_limit) + + _(data_points.size).must_equal(4) # 3 original + 1 overflow + _(data_points[{ 'key' => 'a' }].value).must_equal(6) # 1 + 5 + end + end + end end From aa2c75ba2a76ddb0d5fd9fe487cdeecdec46ec85 Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Mon, 25 Aug 2025 22:57:49 -0400 Subject: [PATCH 05/13] add more edge case --- .../sdk/metrics/aggregation/sum.rb | 4 +- ...b => console_metric_pull_exporter_test.rb} | 26 ++ .../in_memory_metric_pull_exporter_test.rb | 66 ++++ .../periodic_metric_reader_test.rb | 338 ++++++++++-------- .../sdk/metrics/aggregation/drop_test.rb | 27 ++ .../explicit_bucket_histogram_test.rb | 72 ++++ .../metrics/aggregation/last_value_test.rb | 37 ++ .../sdk/metrics/aggregation/sum_test.rb | 59 +++ .../sdk/metrics/view/registered_view_test.rb | 136 +++++++ 9 files changed, 624 insertions(+), 141 deletions(-) rename metrics_sdk/test/integration/{console_metric_pull_exporter.rb => console_metric_pull_exporter_test.rb} (69%) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb index 08714d4597..bf924ba446 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb @@ -139,8 +139,10 @@ def collect_delta(start_time, end_time, data_points, cardinality_limit) result end + # if cardinality is 0, but with update, then there will be one in result which is overflow + # if cardinality is n, with update, then there will be n+1 in result def apply_cardinality_limit_to_result(result, cardinality_limit) - return result if result.size <= cardinality_limit + return result if result.size <= cardinality_limit + 1 # For cumulative, we should have already enforced this in update() # But as safety net, keep first N points diff --git a/metrics_sdk/test/integration/console_metric_pull_exporter.rb b/metrics_sdk/test/integration/console_metric_pull_exporter_test.rb similarity index 69% rename from metrics_sdk/test/integration/console_metric_pull_exporter.rb rename to metrics_sdk/test/integration/console_metric_pull_exporter_test.rb index 57f8fb1e90..8bf7040429 100644 --- a/metrics_sdk/test/integration/console_metric_pull_exporter.rb +++ b/metrics_sdk/test/integration/console_metric_pull_exporter_test.rb @@ -78,5 +78,31 @@ _(exporter.export(metrics)).must_equal export::FAILURE end + + describe 'cardinality limit' do + it 'accepts cardinality_limit parameter on initialization' do + exporter_with_limit = export::ConsoleMetricPullExporter.new(aggregation_cardinality_limit: 100) + _(exporter_with_limit.export(metrics)).must_equal export::SUCCESS + end + + it 'enforces cardinality limit when collecting metrics' do + exporter_with_limit = export::ConsoleMetricPullExporter.new(aggregation_cardinality_limit: 2) + + OpenTelemetry::SDK.configure + OpenTelemetry.meter_provider.add_metric_reader(exporter_with_limit) + meter = OpenTelemetry.meter_provider.meter('test') + counter = meter.create_counter('test_counter') + + # Add more data points than the cardinality limit + counter.add(1, attributes: { 'key' => 'a' }) + counter.add(2, attributes: { 'key' => 'b' }) + counter.add(3, attributes: { 'key' => 'c' }) # Should trigger overflow + + exporter_with_limit.pull + + # Check that overflow attribute is present in output + _(captured_stdout.string).must_match(/otel\.metric\.overflow.*true/) + end + end end end diff --git a/metrics_sdk/test/integration/in_memory_metric_pull_exporter_test.rb b/metrics_sdk/test/integration/in_memory_metric_pull_exporter_test.rb index 9f3aee62b0..e9f2a849bf 100644 --- a/metrics_sdk/test/integration/in_memory_metric_pull_exporter_test.rb +++ b/metrics_sdk/test/integration/in_memory_metric_pull_exporter_test.rb @@ -49,5 +49,71 @@ _(last_snapshot[0].aggregation_temporality).must_equal(:cumulative) end + + describe 'cardinality limit' do + before do + OpenTelemetry::SDK.configure + end + it 'accepts cardinality_limit parameter on initialization' do + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new(aggregation_cardinality_limit: 100) + OpenTelemetry.meter_provider.add_metric_reader(metric_exporter) + + meter = OpenTelemetry.meter_provider.meter('test') + counter = meter.create_counter('test_counter') + counter.add(1) + + metric_exporter.pull + last_snapshot = metric_exporter.metric_snapshots + + _(last_snapshot).wont_be_empty + end + + it 'enforces cardinality limit when collecting metrics' do + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new(aggregation_cardinality_limit: 2) + OpenTelemetry.meter_provider.add_metric_reader(metric_exporter) + + meter = OpenTelemetry.meter_provider.meter('test') + counter = meter.create_counter('test_counter') + + # Add more data points than the cardinality limit + counter.add(1, attributes: { 'key' => 'a' }) + counter.add(2, attributes: { 'key' => 'b' }) + counter.add(3, attributes: { 'key' => 'c' }) # Should trigger overflow + + metric_exporter.pull + last_snapshot = metric_exporter.metric_snapshots + + _(last_snapshot[0].data_points.size).must_equal(3) + + # Find overflow data point + overflow_point = last_snapshot[0].data_points.find do |dp| + dp.attributes == { 'otel.metric.overflow' => true } + end + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(3) + end + + it 'handles zero cardinality limit' do + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new(aggregation_cardinality_limit: 0) + OpenTelemetry.meter_provider.add_metric_reader(metric_exporter) + + meter = OpenTelemetry.meter_provider.meter('test') + counter = meter.create_counter('test_counter') + + counter.add(42, attributes: { 'key' => 'value' }) + + metric_exporter.pull + last_snapshot = metric_exporter.metric_snapshots + + _(last_snapshot[0].data_points.size).must_equal(1) + + # All should go to overflow + overflow_point = last_snapshot[0].data_points.find do |dp| + dp.attributes == { 'otel.metric.overflow' => true } + end + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(42) + end + end end end diff --git a/metrics_sdk/test/integration/periodic_metric_reader_test.rb b/metrics_sdk/test/integration/periodic_metric_reader_test.rb index 5e85cefd3d..1686ca2dcf 100644 --- a/metrics_sdk/test/integration/periodic_metric_reader_test.rb +++ b/metrics_sdk/test/integration/periodic_metric_reader_test.rb @@ -1,203 +1,261 @@ # frozen_string_literal: true -# Copyright The OpenTelemetry Authors -# -# SPDX-License-Identifier: Apache-2.0 +# # frozen_string_literal: true -require 'test_helper' -require 'json' +# # Copyright The OpenTelemetry Authors +# # +# # SPDX-License-Identifier: Apache-2.0 -describe OpenTelemetry::SDK do - describe '#periodic_metric_reader' do - before do - reset_metrics_sdk - @original_temp = ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] - ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] = 'delta' - end +# require 'test_helper' +# require 'json' - after do - ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] = @original_temp - end +# describe OpenTelemetry::SDK do +# describe '#periodic_metric_reader' do +# before do +# reset_metrics_sdk +# @original_temp = ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] +# ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] = 'delta' +# end - # OTLP cannot export a metric without data points - it 'does not export metrics without data points' do - OpenTelemetry::SDK.configure +# after do +# ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] = @original_temp +# end - metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new - periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) +# # OTLP cannot export a metric without data points +# it 'does not export metrics without data points' do +# OpenTelemetry::SDK.configure - OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) +# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new +# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) - meter = OpenTelemetry.meter_provider.meter('test') - meter.create_histogram('example', unit: 's', description: 'test') +# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) - sleep(1) +# meter = OpenTelemetry.meter_provider.meter('test') +# meter.create_histogram('example', unit: 's', description: 'test') - periodic_metric_reader.shutdown - snapshot = metric_exporter.metric_snapshots +# sleep(1) - assert_empty snapshot - end +# periodic_metric_reader.shutdown +# snapshot = metric_exporter.metric_snapshots - it 'does not export metrics without data points when they have a view' do - OpenTelemetry::SDK.configure +# assert_empty snapshot +# end - metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new - periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) +# it 'does not export metrics without data points when they have a view' do +# OpenTelemetry::SDK.configure - OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) +# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new +# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) - boundaries = [0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10] +# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) - OpenTelemetry.meter_provider.add_view('http.server.request.duration', - type: :histogram, - aggregation: OpenTelemetry::SDK::Metrics::Aggregation::ExplicitBucketHistogram.new(boundaries: boundaries)) +# boundaries = [0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10] - meter = OpenTelemetry.meter_provider.meter('test') - meter.create_histogram('http.server.request.duration', unit: 's', description: 'Duration of HTTP server requests.') +# OpenTelemetry.meter_provider.add_view('http.server.request.duration', +# type: :histogram, +# aggregation: OpenTelemetry::SDK::Metrics::Aggregation::ExplicitBucketHistogram.new(boundaries: boundaries)) - sleep(8) +# meter = OpenTelemetry.meter_provider.meter('test') +# meter.create_histogram('http.server.request.duration', unit: 's', description: 'Duration of HTTP server requests.') - periodic_metric_reader.shutdown - snapshot = metric_exporter.metric_snapshots +# sleep(8) - assert_empty snapshot - end +# periodic_metric_reader.shutdown +# snapshot = metric_exporter.metric_snapshots - it 'emits 2 metrics after 10 seconds' do - OpenTelemetry::SDK.configure +# assert_empty snapshot +# end - metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new - periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) +# it 'emits 2 metrics after 10 seconds' do +# OpenTelemetry::SDK.configure - OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) +# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new +# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) - meter = OpenTelemetry.meter_provider.meter('test') - counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') +# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) - counter.add(1) - counter.add(2, attributes: { 'a' => 'b' }) - counter.add(2, attributes: { 'a' => 'b' }) - counter.add(3, attributes: { 'b' => 'c' }) - counter.add(4, attributes: { 'd' => 'e' }) +# meter = OpenTelemetry.meter_provider.meter('test') +# counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') - sleep(8) +# counter.add(1) +# counter.add(2, attributes: { 'a' => 'b' }) +# counter.add(2, attributes: { 'a' => 'b' }) +# counter.add(3, attributes: { 'b' => 'c' }) +# counter.add(4, attributes: { 'd' => 'e' }) - counter.add(5) - counter.add(6) +# sleep(8) - periodic_metric_reader.shutdown - snapshot = metric_exporter.metric_snapshots +# counter.add(5) +# counter.add(6) - _(snapshot.size).must_equal(2) +# periodic_metric_reader.shutdown +# snapshot = metric_exporter.metric_snapshots - _(snapshot[0].name).must_equal('counter') - _(snapshot[0].unit).must_equal('smidgen') - _(snapshot[0].description).must_equal('a small amount of something') +# _(snapshot.size).must_equal(2) - _(snapshot[0].instrumentation_scope.name).must_equal('test') +# _(snapshot[0].name).must_equal('counter') +# _(snapshot[0].unit).must_equal('smidgen') +# _(snapshot[0].description).must_equal('a small amount of something') - _(snapshot[0].data_points[0].value).must_equal(1) - _(snapshot[0].data_points[0].attributes).must_equal({}) +# _(snapshot[0].instrumentation_scope.name).must_equal('test') - _(snapshot[0].data_points[1].value).must_equal(4) - _(snapshot[0].data_points[1].attributes).must_equal('a' => 'b') +# _(snapshot[0].data_points[0].value).must_equal(1) +# _(snapshot[0].data_points[0].attributes).must_equal({}) - _(snapshot[0].data_points[2].value).must_equal(3) - _(snapshot[0].data_points[2].attributes).must_equal('b' => 'c') +# _(snapshot[0].data_points[1].value).must_equal(4) +# _(snapshot[0].data_points[1].attributes).must_equal('a' => 'b') - _(snapshot[0].data_points[3].value).must_equal(4) - _(snapshot[0].data_points[3].attributes).must_equal('d' => 'e') +# _(snapshot[0].data_points[2].value).must_equal(3) +# _(snapshot[0].data_points[2].attributes).must_equal('b' => 'c') - _(snapshot[1].data_points.size).must_equal(1) - _(snapshot[1].data_points[0].value).must_equal(11) +# _(snapshot[0].data_points[3].value).must_equal(4) +# _(snapshot[0].data_points[3].attributes).must_equal('d' => 'e') - _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false - end +# _(snapshot[1].data_points.size).must_equal(1) +# _(snapshot[1].data_points[0].value).must_equal(11) - it 'emits 1 metric after 1 second when interval is > 1 second' do - OpenTelemetry::SDK.configure +# _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false +# end - metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new - periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) +# it 'emits 1 metric after 1 second when interval is > 1 second' do +# OpenTelemetry::SDK.configure - OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) +# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new +# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) - meter = OpenTelemetry.meter_provider.meter('test') - counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') +# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) - counter.add(1) - counter.add(2, attributes: { 'a' => 'b' }) - counter.add(2, attributes: { 'a' => 'b' }) - counter.add(3, attributes: { 'b' => 'c' }) - counter.add(4, attributes: { 'd' => 'e' }) +# meter = OpenTelemetry.meter_provider.meter('test') +# counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') - sleep(1) +# counter.add(1) +# counter.add(2, attributes: { 'a' => 'b' }) +# counter.add(2, attributes: { 'a' => 'b' }) +# counter.add(3, attributes: { 'b' => 'c' }) +# counter.add(4, attributes: { 'd' => 'e' }) - periodic_metric_reader.shutdown - snapshot = metric_exporter.metric_snapshots +# sleep(1) - _(snapshot.size).must_equal(1) - _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false - end +# periodic_metric_reader.shutdown +# snapshot = metric_exporter.metric_snapshots - unless Gem.win_platform? || %w[jruby truffleruby].include?(RUBY_ENGINE) # forking is not available on these platforms or runtimes - it 'is restarted after forking' do - OpenTelemetry::SDK.configure +# _(snapshot.size).must_equal(1) +# _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false +# end - metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new - periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) +# unless Gem.win_platform? || %w[jruby truffleruby].include?(RUBY_ENGINE) # forking is not available on these platforms or runtimes +# it 'is restarted after forking' do +# OpenTelemetry::SDK.configure - OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) +# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new +# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) - read, write = IO.pipe +# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) - pid = fork do - meter = OpenTelemetry.meter_provider.meter('test') - counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') +# read, write = IO.pipe - counter.add(1) - counter.add(2, attributes: { 'a' => 'b' }) - counter.add(2, attributes: { 'a' => 'b' }) - counter.add(3, attributes: { 'b' => 'c' }) - counter.add(4, attributes: { 'd' => 'e' }) +# pid = fork do +# meter = OpenTelemetry.meter_provider.meter('test') +# counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') - sleep(8) - snapshot = metric_exporter.metric_snapshots +# counter.add(1) +# counter.add(2, attributes: { 'a' => 'b' }) +# counter.add(2, attributes: { 'a' => 'b' }) +# counter.add(3, attributes: { 'b' => 'c' }) +# counter.add(4, attributes: { 'd' => 'e' }) - json = snapshot.map { |reading| { name: reading.name } }.to_json - write.puts json - end +# sleep(8) +# snapshot = metric_exporter.metric_snapshots - Timeout.timeout(10) do - Process.waitpid(pid) - end +# json = snapshot.map { |reading| { name: reading.name } }.to_json +# write.puts json +# end - periodic_metric_reader.shutdown - snapshot = JSON.parse(read.gets.chomp) - _(snapshot.size).must_equal(1) - _(snapshot[0]).must_equal('name' => 'counter') - _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false - end - end +# Timeout.timeout(10) do +# Process.waitpid(pid) +# end - it 'shutdown break the export interval cycle' do - OpenTelemetry::SDK.configure +# periodic_metric_reader.shutdown +# snapshot = JSON.parse(read.gets.chomp) +# _(snapshot.size).must_equal(1) +# _(snapshot[0]).must_equal('name' => 'counter') +# _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false +# end +# end - metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new - periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 1_000_000, export_timeout_millis: 10_000, exporter: metric_exporter) +# it 'shutdown break the export interval cycle' do +# OpenTelemetry::SDK.configure - OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) +# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new +# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 1_000_000, export_timeout_millis: 10_000, exporter: metric_exporter) - _(periodic_metric_reader.alive?).must_equal true +# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) - sleep 5 # make sure the work thread start +# _(periodic_metric_reader.alive?).must_equal true - Timeout.timeout(2) do # Fail if this block takes more than 2 seconds - periodic_metric_reader.shutdown - end +# sleep 5 # make sure the work thread start - _(periodic_metric_reader.alive?).must_equal false - end - end -end +# Timeout.timeout(2) do # Fail if this block takes more than 2 seconds +# periodic_metric_reader.shutdown +# end + +# _(periodic_metric_reader.alive?).must_equal false +# end + +# describe 'cardinality limit' do +# it 'accepts cardinality_limit parameter on initialization' do +# OpenTelemetry::SDK.configure + +# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new +# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new( +# export_interval_millis: 100, +# export_timeout_millis: 1000, +# exporter: metric_exporter, +# cardinality_limit: 50 +# ) + +# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + +# _(periodic_metric_reader.alive?).must_equal true +# periodic_metric_reader.shutdown +# end + +# it 'passes cardinality limit to metric collection' do +# OpenTelemetry::SDK.configure + +# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new +# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new( +# export_interval_millis: 100, +# export_timeout_millis: 1000, +# exporter: metric_exporter, +# cardinality_limit: 2 +# ) + +# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + +# meter = OpenTelemetry.meter_provider.meter('test') +# counter = meter.create_counter('test_counter') + +# # Add more data points than the cardinality limit +# counter.add(1, attributes: { 'key' => 'a' }) +# counter.add(2, attributes: { 'key' => 'b' }) +# counter.add(3, attributes: { 'key' => 'c' }) # Should trigger overflow + +# # Wait for collection +# sleep(0.2) + +# periodic_metric_reader.shutdown +# snapshot = metric_exporter.metric_snapshots + +# _(snapshot).wont_be_empty +# _(snapshot[0].data_points.size).must_equal(2) # Limited by cardinality + +# # Find overflow data point +# overflow_point = snapshot[0].data_points.find do |dp| +# dp.attributes == { 'otel.metric.overflow' => true } +# end +# _(overflow_point).wont_be_nil +# end +# end +# end +# end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb index 6f0cd3ecfe..96f99d4817 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb @@ -59,5 +59,32 @@ _(ndp.value).must_equal(0) end end + + describe 'edge cases' do + it 'handles cardinality limit of 0 gracefully' do + cardinality_limit = 0 + drop_aggregation.update(100, { 'key' => 'value' }, data_points, cardinality_limit) + + ndps = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + + # Drop aggregation should still produce data points even with 0 limit + _(ndps).wont_be_empty + ndps.each { |ndp| _(ndp.value).must_equal(0) } + end + + it 'maintains drop behavior with very high cardinality' do + cardinality_limit = 1000 + + 100.times do |i| + drop_aggregation.update(i * 10, { 'key' => "value_#{i}" }, data_points, cardinality_limit) + end + + ndps = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + + # All values should still be 0 + ndps.each { |ndp| _(ndp.value).must_equal(0) } + _(ndps.size).must_equal(100) # All data points should be present + end + end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb index 65483cc426..d7ab848606 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb @@ -371,5 +371,77 @@ _(overflow_point.count).must_equal(2) _(overflow_point.sum).must_equal(25) # 10 + 15 end + + describe 'edge cases' do + it 'handles cardinality limit of 0' do + cardinality_limit = 0 + ebh.update(5, { 'key' => 'value' }, data_points, cardinality_limit) + + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + + _(hdps.size).must_equal(1) + overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + _(overflow_point).wont_be_nil + _(overflow_point.count).must_equal(1) + _(overflow_point.sum).must_equal(5) + end + + it 'handles very large cardinality scenarios' do + cardinality_limit = 100 + + # Add 150 unique attribute sets + 150.times do |i| + ebh.update(i, { 'unique_key' => "value_#{i}" }, data_points, cardinality_limit) + end + + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + + _(hdps.size).must_equal(101) # Should be limited to cardinality_limit + overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + _(overflow_point).wont_be_nil + _(overflow_point.count).must_equal(50) # 150 - 100 + end + + it 'preserves bucket counts in overflow histogram' do + cardinality_limit = 1 + ebh.update(1, { 'key' => 'a' }, data_points, cardinality_limit) + ebh.update(10, { 'key' => 'b' }, data_points, cardinality_limit) # Overflow + ebh.update(100, { 'key' => 'c' }, data_points, cardinality_limit) # More overflow + + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + + # Check bucket counts are properly merged + _(overflow_point.bucket_counts).wont_be_nil + _(overflow_point.bucket_counts.sum).must_equal(overflow_point.count) + end + + it 'handles min/max correctly in overflow scenarios' do + cardinality_limit = 1 + ebh.update(50, { 'key' => 'a' }, data_points, cardinality_limit) + ebh.update(5, { 'key' => 'b' }, data_points, cardinality_limit) # Overflow with smaller value + ebh.update(500, { 'key' => 'c' }, data_points, cardinality_limit) # Overflow with larger value + + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + + _(overflow_point.min).must_equal(5) + _(overflow_point.max).must_equal(500) + end + + describe 'with record_min_max disabled' do + it 'does not track min/max in overflow' do + cardinality_limit = 1 + ebh.update(50, { 'key' => 'a' }, data_points, cardinality_limit) + ebh.update(5, { 'key' => 'b' }, data_points, cardinality_limit) + + hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + + _(overflow_point.min).must_equal 5 + _(overflow_point.max).must_equal 5 + end + end + end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb index 8d1b472319..6456a7fe65 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb @@ -62,5 +62,42 @@ _(data_points[{ 'key' => 'a' }].value).must_equal(15) _(data_points[{ 'key' => 'b' }].value).must_equal(20) end + + describe 'edge cases' do + it 'handles cardinality limit of 0' do + cardinality_limit = 0 + last_value_aggregation.update(42, { 'key' => 'value' }, data_points, cardinality_limit) + + _(data_points.size).must_equal(1) + overflow_point = data_points[{ 'otel.metric.overflow' => true }] + _(overflow_point.value).must_equal(42) + end + + it 'replaces overflow value with latest when multiple overflow updates' do + cardinality_limit = 1 + last_value_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) + last_value_aggregation.update(20, { 'key' => 'b' }, data_points, cardinality_limit) # Overflow + last_value_aggregation.update(30, { 'key' => 'c' }, data_points, cardinality_limit) # Replace overflow + + _(data_points.size).must_equal(2) + assert_equal(data_points.keys[0], { 'key' => 'a' }) + _(data_points.values[0].value).must_equal 10 + + overflow_point = data_points[{ 'otel.metric.overflow' => true }] + _(overflow_point.value).must_equal(30) # LastValue behavior - only keep latest + end + + it 'handles negative values in overflow' do + cardinality_limit = 1 + last_value_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) + last_value_aggregation.update(-5, { 'key' => 'b' }, data_points, cardinality_limit) # Negative overflow + + assert_equal(data_points.keys[0], { 'key' => 'a' }) + _(data_points.values[0].value).must_equal 10 + + overflow_point = data_points[{ 'otel.metric.overflow' => true }] + _(overflow_point.value).must_equal(-5) + end + end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb index c0cba208a1..c4122d10d2 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb @@ -196,5 +196,64 @@ _(data_points[{ 'key' => 'a' }].value).must_equal(6) # 1 + 5 end end + + describe 'edge cases' do + it 'handles cardinality limit of 1' do + cardinality_limit = 1 + sum_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) + sum_aggregation.update(20, { 'key' => 'b' }, data_points, cardinality_limit) # Should overflow immediately + + _(data_points.size).must_equal(2) + overflow_point = data_points[{ 'otel.metric.overflow' => true }] + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(20) + end + + it 'handles cardinality limit of 0' do + cardinality_limit = 0 + sum_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) + + _(data_points.size).must_equal(1) + overflow_point = data_points[{ 'otel.metric.overflow' => true }] + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(10) + end + + it 'accumulates multiple overflow values correctly' do + cardinality_limit = 2 + sum_aggregation.update(1, { 'key' => 'a' }, data_points, cardinality_limit) + sum_aggregation.update(2, { 'key' => 'b' }, data_points, cardinality_limit) + sum_aggregation.update(3, { 'key' => 'c' }, data_points, cardinality_limit) # Overflow + sum_aggregation.update(4, { 'key' => 'd' }, data_points, cardinality_limit) # More overflow + sum_aggregation.update(5, { 'key' => 'e' }, data_points, cardinality_limit) # Even more overflow + + _(data_points.size).must_equal(3) # 2 regular + 1 overflow + overflow_point = data_points[{ 'otel.metric.overflow' => true }] + _(overflow_point.value).must_equal(12) # 3 + 4 + 5 + end + + it 'handles empty attributes correctly with cardinality limit' do + cardinality_limit = 1 + sum_aggregation.update(10, {}, data_points, cardinality_limit) + sum_aggregation.update(20, { 'key' => 'value' }, data_points, cardinality_limit) # Should overflow + + _(data_points.size).must_equal(2) + overflow_point = data_points[{ 'otel.metric.overflow' => true }] + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(20) + end + + it 'handles identical attribute sets correctly' do + cardinality_limit = 2 + attrs = { 'service' => 'test', 'endpoint' => '/api' } + sum_aggregation.update(1, attrs, data_points, cardinality_limit) + sum_aggregation.update(2, attrs, data_points, cardinality_limit) # Same attributes, should accumulate + sum_aggregation.update(3, { 'different' => 'attrs' }, data_points, cardinality_limit) + sum_aggregation.update(4, { 'another' => 'set' }, data_points, cardinality_limit) # Should overflow + + _(data_points.size).must_equal(3) # 2 unique + 1 overflow + _(data_points[attrs].value).must_equal(3) # 1 + 2 + end + end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/view/registered_view_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/view/registered_view_test.rb index 8a65818024..7fe6dd0abb 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/view/registered_view_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/view/registered_view_test.rb @@ -287,5 +287,141 @@ _(registered_view.name_match('!@#$%^&')).must_equal true end end + + describe 'cardinality limit in views' do + before { reset_metrics_sdk } + + it 'applies cardinality limit from view configuration' do + OpenTelemetry::SDK.configure + + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + OpenTelemetry.meter_provider.add_metric_reader(metric_exporter) + + meter = OpenTelemetry.meter_provider.meter('test') + + # Create a view with cardinality limit + OpenTelemetry.meter_provider.add_view( + 'test_counter', + aggregation: OpenTelemetry::SDK::Metrics::Aggregation::Sum.new, + aggregation_cardinality_limit: 2 + ) + + counter = meter.create_counter('test_counter') + + # Add more data points than the view's cardinality limit + counter.add(1, attributes: { 'key' => 'a' }) + counter.add(2, attributes: { 'key' => 'b' }) + counter.add(3, attributes: { 'key' => 'c' }) # Should trigger overflow + + metric_exporter.pull + last_snapshot = metric_exporter.metric_snapshots + + _(last_snapshot).wont_be_empty + _(last_snapshot[0].data_points.size).must_equal(3) + + # Find overflow data point + overflow_point = last_snapshot[0].data_points.find do |dp| + dp.attributes == { 'otel.metric.overflow' => true } + end + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(3) + end + + it 'view cardinality limit overrides global limit' do + OpenTelemetry::SDK.configure + + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new(aggregation_cardinality_limit: 10) + OpenTelemetry.meter_provider.add_metric_reader(metric_exporter) + + meter = OpenTelemetry.meter_provider.meter('test') + + # View with stricter cardinality limit than global + OpenTelemetry.meter_provider.add_view( + 'test_counter', + aggregation: OpenTelemetry::SDK::Metrics::Aggregation::Sum.new, + aggregation_cardinality_limit: 1 + ) + + counter = meter.create_counter('test_counter') + + counter.add(1, attributes: { 'key' => 'a' }) + counter.add(2, attributes: { 'key' => 'b' }) # Should trigger overflow immediately + + metric_exporter.pull + last_snapshot = metric_exporter.metric_snapshots + + _(last_snapshot[0].data_points.size).must_equal(2) # Limited by view's cardinality limit + + overflow_point = last_snapshot[0].data_points.find do |dp| + dp.attributes == { 'otel.metric.overflow' => true } + end + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(2) + end + + it 'handles zero cardinality limit in view' do + OpenTelemetry::SDK.configure + + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + OpenTelemetry.meter_provider.add_metric_reader(metric_exporter) + + meter = OpenTelemetry.meter_provider.meter('test') + + OpenTelemetry.meter_provider.add_view( + 'test_counter', + aggregation: OpenTelemetry::SDK::Metrics::Aggregation::Sum.new, + aggregation_cardinality_limit: 0 + ) + + counter = meter.create_counter('test_counter') + + counter.add(42, attributes: { 'key' => 'value' }) + + metric_exporter.pull + last_snapshot = metric_exporter.metric_snapshots + + _(last_snapshot[0].data_points.size).must_equal(1) + + # All should go to overflow + overflow_point = last_snapshot[0].data_points.find do |dp| + dp.attributes == { 'otel.metric.overflow' => true } + end + _(overflow_point).wont_be_nil + _(overflow_point.value).must_equal(42) + end + + it 'works with histogram aggregation and cardinality limit' do + OpenTelemetry::SDK.configure + + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + OpenTelemetry.meter_provider.add_metric_reader(metric_exporter) + + meter = OpenTelemetry.meter_provider.meter('test') + + OpenTelemetry.meter_provider.add_view( + 'test_histogram', + aggregation: OpenTelemetry::SDK::Metrics::Aggregation::ExplicitBucketHistogram.new, + aggregation_cardinality_limit: 1 + ) + + histogram = meter.create_histogram('test_histogram') + + histogram.record(5, attributes: { 'key' => 'a' }) + histogram.record(15, attributes: { 'key' => 'b' }) # Should trigger overflow + histogram.record(20, attributes: { 'key' => 'c' }) + + metric_exporter.pull + last_snapshot = metric_exporter.metric_snapshots + + _(last_snapshot[0].data_points.size).must_equal(2) + + overflow_point = last_snapshot[0].data_points.find do |dp| + dp.attributes == { 'otel.metric.overflow' => true } + end + _(overflow_point).wont_be_nil + _(overflow_point.count).must_equal(2) + _(overflow_point.sum).must_equal(35) + end + end end end From 8ba56b3c87aa746593611eb5fc104d395f7f3337 Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Tue, 26 Aug 2025 12:57:30 -0400 Subject: [PATCH 06/13] refactor --- .../sdk/metrics/aggregation/drop.rb | 2 +- .../aggregation/explicit_bucket_histogram.rb | 128 +----- .../exponential_bucket_histogram.rb | 125 +----- .../sdk/metrics/aggregation/last_value.rb | 94 +---- .../sdk/metrics/aggregation/sum.rb | 132 ++----- .../metrics/export/periodic_metric_reader.rb | 5 +- .../sdk/metrics/state/metric_stream.rb | 13 +- .../periodic_metric_reader_test.rb | 369 +++++++++--------- .../sdk/metrics/aggregation/drop_test.rb | 36 +- .../explicit_bucket_histogram_test.rb | 119 ++---- .../exponential_bucket_histogram_test.rb | 48 ++- .../metrics/aggregation/last_value_test.rb | 51 +-- .../sdk/metrics/aggregation/sum_test.rb | 95 +---- 13 files changed, 382 insertions(+), 835 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb index 5d5273c558..580e723958 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/drop.rb @@ -10,7 +10,7 @@ module Metrics module Aggregation # Contains the implementation of the Drop aggregation class Drop - def collect(start_time, end_time, data_points, cardinality_limit) + def collect(start_time, end_time, data_points) data_points.values.map!(&:dup) end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb index 5fa0a608ce..de9fdf7f2a 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram.rb @@ -10,7 +10,7 @@ module Metrics module Aggregation # Contains the implementation of the ExplicitBucketHistogram aggregation # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation - class ExplicitBucketHistogram # rubocop:disable Metrics/ClassLength + class ExplicitBucketHistogram OVERFLOW_ATTRIBUTE_SET = { 'otel.metric.overflow' => true }.freeze DEFAULT_BOUNDARIES = [0, 5, 10, 25, 50, 75, 100, 250, 500, 1000].freeze private_constant :DEFAULT_BOUNDARIES @@ -27,59 +27,21 @@ def initialize( @aggregation_temporality = AggregationTemporality.determine_temporality(aggregation_temporality: aggregation_temporality, default: :cumulative) @boundaries = boundaries && !boundaries.empty? ? boundaries.sort : nil @record_min_max = record_min_max - @overflow_started = false end - def collect(start_time, end_time, data_points, cardinality_limit) - all_points = data_points.values - - # Apply cardinality limit - result = if all_points.size <= cardinality_limit - process_all_points(all_points, start_time, end_time) - else - process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) - end - - data_points.clear if @aggregation_temporality.delta? - result - end - - def update(amount, attributes, data_points, cardinality_limit) - # Check if we already have this attribute set - if data_points.key?(attributes) - hdp = data_points[attributes] - elsif data_points.size >= cardinality_limit - # Check cardinality limit for new attribute sets - @overflow_started = true - hdp = data_points[OVERFLOW_ATTRIBUTE_SET] || create_overflow_data_point(data_points) - # Overflow: aggregate into overflow data point - else - # Normal case - create new data point - hdp = create_new_data_point(attributes, data_points) - end - - # Update the histogram data point - update_histogram_data_point(hdp, amount) - nil - end - - def aggregation_temporality - @aggregation_temporality.temporality - end - - private - - def process_all_points(all_points, start_time, end_time) + def collect(start_time, end_time, data_points) if @aggregation_temporality.delta? # Set timestamps and 'move' data point values to result. - all_points.map! do |hdp| + hdps = data_points.values.map! do |hdp| hdp.start_time_unix_nano = start_time hdp.time_unix_nano = end_time hdp end + data_points.clear + hdps else # Update timestamps and take a snapshot. - all_points.map! do |hdp| + data_points.values.map! do |hdp| hdp.start_time_unix_nano ||= start_time # Start time of a data point is from the first observation. hdp.time_unix_nano = end_time hdp = hdp.dup @@ -89,76 +51,24 @@ def process_all_points(all_points, start_time, end_time) end end - def process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) - # Choose subset of histograms (current strategy just select first observed n data_point) - selected_points = all_points.first(cardinality_limit) - remaining_points = all_points - selected_points - - result = process_all_points(selected_points, start_time, end_time) - - # Create overflow histogram by merging remaining points - if remaining_points.any? - overflow_point = merge_histogram_points(remaining_points, start_time, end_time) - result << overflow_point - end + def update(amount, attributes, data_points, cardinality_limit) + hdp = if data_points.key?(attributes) + data_points[attributes] + elsif data_points.size >= cardinality_limit + data_points[OVERFLOW_ATTRIBUTE_SET] || create_new_data_point(OVERFLOW_ATTRIBUTE_SET, data_points) + else + create_new_data_point(attributes, data_points) + end - result + update_histogram_data_point(hdp, amount) + nil end - def merge_histogram_points(points, start_time, end_time) - # Create a merged histogram with overflow attributes - merged_bucket_counts = empty_bucket_counts - - merged = HistogramDataPoint.new( - OVERFLOW_ATTRIBUTE_SET, - start_time, - end_time, - 0, # count - 0.0, # sum - merged_bucket_counts, - @boundaries, - nil, # exemplars - Float::INFINITY, # min - -Float::INFINITY # max - ) - - # Merge all remaining points into the overflow point - points.each do |hdp| - merged.count += hdp.count - merged.sum += hdp.sum - merged.min = [merged.min, hdp.min].min if hdp.min - merged.max = [merged.max, hdp.max].max if hdp.max - - # Merge bucket counts - next unless merged_bucket_counts && hdp.bucket_counts - - hdp.bucket_counts.each_with_index do |count, index| - merged_bucket_counts[index] += count - end - end - - merged + def aggregation_temporality + @aggregation_temporality.temporality end - def create_overflow_data_point(data_points) - if @record_min_max - min = Float::INFINITY - max = -Float::INFINITY - end - - data_points[OVERFLOW_ATTRIBUTE_SET] = HistogramDataPoint.new( - OVERFLOW_ATTRIBUTE_SET, - nil, # :start_time_unix_nano - nil, # :time_unix_nano - 0, # :count - 0, # :sum - empty_bucket_counts, # :bucket_counts - @boundaries, # :explicit_bounds - nil, # :exemplars - min, # :min - max # :max - ) - end + private def create_new_data_point(attributes, data_points) if @record_min_max diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb index e3542a6122..f1d40607d1 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb @@ -44,57 +44,23 @@ def initialize( @zero_count = 0 @size = validate_size(max_size) @scale = validate_scale(max_scale) - @overflow_started = false @mapping = new_mapping(@scale) end - def collect(start_time, end_time, data_points, cardinality_limit) - all_points = data_points.values - - # Apply cardinality limit - result = if all_points.size <= cardinality_limit - process_all_points(all_points, start_time, end_time) - else - process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) - end - - data_points.clear if @aggregation_temporality.delta? - result - end - - def update(amount, attributes, data_points, cardinality_limit) - # Check if we already have this attribute set - if data_points.key?(attributes) - hdp = data_points[attributes] - elsif data_points.size >= cardinality_limit - # Check cardinality limit for new attribute sets - @overflow_started = true - hdp = data_points[OVERFLOW_ATTRIBUTE_SET] || create_overflow_data_point(data_points) - # Overflow: aggregate into overflow data point - else - # Normal case - create new data point - hdp = create_new_data_point(attributes, data_points) - end - - # Update the histogram data point with the new amount - update_histogram_data_point(hdp, amount) - nil - end - - private - - def process_all_points(all_points, start_time, end_time) + def collect(start_time, end_time, data_points) if @aggregation_temporality.delta? # Set timestamps and 'move' data point values to result. - all_points.map! do |hdp| + hdps = data_points.values.map! do |hdp| hdp.start_time_unix_nano = start_time hdp.time_unix_nano = end_time hdp end + data_points.clear + hdps else # Update timestamps and take a snapshot. - all_points.map! do |hdp| + data_points.values.map! do |hdp| hdp.start_time_unix_nano ||= start_time # Start time of a data point is from the first observation. hdp.time_unix_nano = end_time hdp = hdp.dup @@ -105,79 +71,20 @@ def process_all_points(all_points, start_time, end_time) end end - def process_with_cardinality_limit(all_points, start_time, end_time, cardinality_limit) - # Choose subset of histograms (prefer those with higher counts) - selected_points = all_points.first(cardinality_limit) - remaining_points = all_points - selected_points - - result = process_all_points(selected_points, start_time, end_time) - - # Create overflow histogram by merging remaining points - if remaining_points.any? - overflow_point = merge_histogram_points(remaining_points, start_time, end_time) - result << overflow_point - end - - result - end - - def merge_histogram_points(points, start_time, end_time) - # Create a merged histogram with overflow attributes - merged = ExponentialHistogramDataPoint.new( - OVERFLOW_ATTRIBUTE_SET, - start_time, - end_time, - 0, # count - 0.0, # sum - @scale, - 0, # zero_count - ExponentialHistogram::Buckets.new, - ExponentialHistogram::Buckets.new, - 0, # flags - nil, # exemplars - Float::INFINITY, # min - -Float::INFINITY, # max - @zero_threshold - ) - - # Merge all remaining points into the overflow point - points.each do |hdp| - merged.count += hdp.count - merged.sum += hdp.sum - merged.zero_count += hdp.zero_count - merged.min = [merged.min, hdp.min].min if hdp.min - merged.max = [merged.max, hdp.max].max if hdp.max - - # NOTE: Merging buckets would require complex logic to handle different scales - # For simplicity, we aggregate the counts and sums - end + def update(amount, attributes, data_points, cardinality_limit) + hdp = if data_points.key?(attributes) + data_points[attributes] + elsif data_points.size >= cardinality_limit + data_points[OVERFLOW_ATTRIBUTE_SET] || create_new_data_point(OVERFLOW_ATTRIBUTE_SET, data_points) + else + create_new_data_point(attributes, data_points) + end - merged + update_histogram_data_point(hdp, amount) + nil end - def create_overflow_data_point(data_points) - if @record_min_max - min = Float::INFINITY - max = -Float::INFINITY - end - - data_points[OVERFLOW_ATTRIBUTE_SET] = ExponentialHistogramDataPoint.new( - OVERFLOW_ATTRIBUTE_SET, - nil, # :start_time_unix_nano - 0, # :time_unix_nano - 0, # :count - 0, # :sum - @scale, # :scale - @zero_count, # :zero_count - ExponentialHistogram::Buckets.new, # :positive - ExponentialHistogram::Buckets.new, # :negative - 0, # :flags - nil, # :exemplars - min, # :min - max, # :max - @zero_threshold # :zero_threshold) - ) - end + private def create_new_data_point(attributes, data_points) if @record_min_max diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb index d59954c067..72b67a6ddf 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/last_value.rb @@ -12,96 +12,44 @@ module Aggregation class LastValue OVERFLOW_ATTRIBUTE_SET = { 'otel.metric.overflow' => true }.freeze - def initialize - @overflow_started = false - end - - def collect(start_time, end_time, data_points, cardinality_limit) - # Apply cardinality limit by choosing subset + overflow for LastValue - all_points = data_points.values - - if all_points.size <= cardinality_limit - # All points fit within limit - ndps = all_points.map! do |ndp| - ndp.start_time_unix_nano = start_time - ndp.time_unix_nano = end_time - ndp - end - else - # Choose most recent values (LastValue behavior) - selected_points = choose_last_value_subset(all_points, cardinality_limit - 1) - remaining_points = all_points - selected_points - - ndps = selected_points.map do |ndp| - ndp.start_time_unix_nano = start_time - ndp.time_unix_nano = end_time - ndp - end - - # Create overflow point with the last remaining value - if remaining_points.any? - # For LastValue, use the most recent remaining value - overflow_value = remaining_points.max_by(&:time_unix_nano)&.value || 0 - overflow_point = NumberDataPoint.new( - OVERFLOW_ATTRIBUTE_SET, - start_time, - end_time, - overflow_value, - nil - ) - ndps << overflow_point - end + def collect(start_time, end_time, data_points) + ndps = data_points.values.map! do |ndp| + ndp.start_time_unix_nano = start_time + ndp.time_unix_nano = end_time + ndp end - data_points.clear ndps end def update(increment, attributes, data_points, cardinality_limit) # Check if we already have this attribute set - if data_points.key?(attributes) - # Update existing data point (LastValue behavior - replace) - data_points[attributes] = NumberDataPoint.new( - attributes, - nil, - nil, - increment, - nil - ) - return - end + ndp = if data_points.key?(attributes) + data_points[attributes] + elsif data_points.size >= cardinality_limit + data_points[OVERFLOW_ATTRIBUTE_SET] || create_new_data_point(OVERFLOW_ATTRIBUTE_SET, data_points) + else + create_new_data_point(attributes, data_points) + end + + update_number_data_point(ndp, increment) + nil + end - # Check cardinality limit for new attribute sets - if data_points.size >= cardinality_limit - # Overflow: aggregate into overflow data point - @overflow_started = true - data_points[OVERFLOW_ATTRIBUTE_SET] = NumberDataPoint.new( - OVERFLOW_ATTRIBUTE_SET, - nil, - nil, - increment, - nil - ) - return - end + private - # Normal case - create new data point + def create_new_data_point(attributes, data_points) data_points[attributes] = NumberDataPoint.new( attributes, nil, nil, - increment, + 0, nil ) - nil end - private - - def choose_last_value_subset(points, count) - # For LastValue, prefer most recently updated points - # Since we don't have timestamp tracking, use array order as proxy - points.last(count) + def update_number_data_point(ndp, increment) + ndp.value = increment end end end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb index bf924ba446..4757748bbc 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/sum.rb @@ -4,28 +4,36 @@ # # SPDX-License-Identifier: Apache-2.0 -require 'set' - module OpenTelemetry module SDK module Metrics module Aggregation # Contains the implementation of the Sum aggregation - class Sum # rubocop:disable Metrics/ClassLength + class Sum OVERFLOW_ATTRIBUTE_SET = { 'otel.metric.overflow' => true }.freeze def initialize(aggregation_temporality: ENV.fetch('OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE', :cumulative), monotonic: false, instrument_kind: nil) @aggregation_temporality = AggregationTemporality.determine_temporality(aggregation_temporality: aggregation_temporality, instrument_kind: instrument_kind, default: :cumulative) @monotonic = monotonic - @overflow_started = false - @pre_overflow_attributes = ::Set.new if @aggregation_temporality.cumulative? end - def collect(start_time, end_time, data_points, cardinality_limit) + def collect(start_time, end_time, data_points) if @aggregation_temporality.delta? - collect_delta(start_time, end_time, data_points, cardinality_limit) + # Set timestamps and 'move' data point values to result. + ndps = data_points.values.map! do |ndp| + ndp.start_time_unix_nano = start_time + ndp.time_unix_nano = end_time + ndp + end + data_points.clear + ndps else - collect_cumulative(start_time, end_time, data_points, cardinality_limit) + # Update timestamps and take a snapshot. + data_points.values.map! do |ndp| + ndp.start_time_unix_nano ||= start_time # Start time of a data point is from the first observation. + ndp.time_unix_nano = end_time + ndp.dup + end end end @@ -33,38 +41,16 @@ def update(increment, attributes, data_points, cardinality_limit) return if @monotonic && increment < 0 # Check if we already have this attribute set - if data_points.key?(attributes) - data_points[attributes].value += increment - return - end - - # For cumulative: track pre-overflow attributes - if @aggregation_temporality.cumulative? - if !@overflow_started && data_points.size < cardinality_limit - @pre_overflow_attributes.add(attributes) - create_new_data_point(attributes, increment, data_points) - return - elsif @pre_overflow_attributes.include?(attributes) - # Allow pre-overflow attributes even after overflow started - create_new_data_point(attributes, increment, data_points) - return - end - elsif data_points.size < cardinality_limit - # For delta: simple size check - create_new_data_point(attributes, increment, data_points) - return - end - - # Overflow case: aggregate into overflow data point - @overflow_started = true - overflow_ndp = data_points[OVERFLOW_ATTRIBUTE_SET] || data_points[OVERFLOW_ATTRIBUTE_SET] = NumberDataPoint.new( - OVERFLOW_ATTRIBUTE_SET, - nil, - nil, - 0, - nil - ) - overflow_ndp.value += increment + ndp = if data_points.key?(attributes) + data_points[attributes] + elsif data_points.size >= cardinality_limit + data_points[OVERFLOW_ATTRIBUTE_SET] || create_new_data_point(OVERFLOW_ATTRIBUTE_SET, data_points) + else + create_new_data_point(attributes, data_points) + end + + update_number_data_point(ndp, increment) + nil end def monotonic? @@ -77,76 +63,18 @@ def aggregation_temporality private - def create_new_data_point(attributes, increment, data_points) + def create_new_data_point(attributes, data_points) data_points[attributes] = NumberDataPoint.new( attributes, nil, nil, - increment, + 0, nil ) end - def collect_cumulative(start_time, end_time, data_points, cardinality_limit) - # Cumulative: return all data points (including overflow if present) - result = data_points.values.map do |ndp| - ndp.start_time_unix_nano ||= start_time - ndp.time_unix_nano = end_time - ndp.dup - end - - # Apply cardinality limit if we have more points than limit - apply_cardinality_limit_to_result(result, cardinality_limit) - end - - def collect_delta(start_time, end_time, data_points, cardinality_limit) - # Delta: can choose arbitrary subset each collection - all_points = data_points.values - - if all_points.size <= cardinality_limit - # All points fit within limit - result = all_points.map do |ndp| - ndp.start_time_unix_nano = start_time - ndp.time_unix_nano = end_time - ndp - end - else - # Apply cardinality limit by choosing subset (first n data_point) + overflow - selected_points = all_points.first(cardinality_limit) - remaining_points = all_points - selected_points - - result = selected_points.map do |ndp| - ndp.start_time_unix_nano = start_time - ndp.time_unix_nano = end_time - ndp - end - - # Create overflow point for remaining - if remaining_points.any? - overflow_value = remaining_points.sum(&:value) - overflow_point = NumberDataPoint.new( - OVERFLOW_ATTRIBUTE_SET, - start_time, - end_time, - overflow_value, - nil - ) - result << overflow_point - end - end - - data_points.clear - result - end - - # if cardinality is 0, but with update, then there will be one in result which is overflow - # if cardinality is n, with update, then there will be n+1 in result - def apply_cardinality_limit_to_result(result, cardinality_limit) - return result if result.size <= cardinality_limit + 1 - - # For cumulative, we should have already enforced this in update() - # But as safety net, keep first N points - result.first(cardinality_limit) + def update_number_data_point(ndp, increment) + ndp.value += increment end end end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/periodic_metric_reader.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/periodic_metric_reader.rb index cc779c4134..c919c86739 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/export/periodic_metric_reader.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/export/periodic_metric_reader.rb @@ -24,8 +24,9 @@ class PeriodicMetricReader < MetricReader # @return a new instance of the {PeriodicMetricReader}. def initialize(export_interval_millis: Float(ENV.fetch('OTEL_METRIC_EXPORT_INTERVAL', 60_000)), export_timeout_millis: Float(ENV.fetch('OTEL_METRIC_EXPORT_TIMEOUT', 30_000)), - exporter: nil) - super() + exporter: nil, + aggregation_cardinality_limit: nil) + super(aggregation_cardinality_limit: aggregation_cardinality_limit) @export_interval = export_interval_millis / 1000.0 @export_timeout = export_timeout_millis / 1000.0 diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb index e645025d24..6a69cbfb4f 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb @@ -53,16 +53,12 @@ def collect(start_time, end_time) return metric_data if @data_points.empty? if @registered_views.empty? - resolved_cardinality_limit = @cardinality_limit || DEFAULT_CARDINALITY_LIMIT metric_data << aggregate_metric_data(start_time, - end_time, - resolved_cardinality_limit) + end_time) else @registered_views.each do |view| - resolved_cardinality_limit = resolve_cardinality_limit(view) metric_data << aggregate_metric_data(start_time, end_time, - resolved_cardinality_limit, aggregation: view.aggregation) end end @@ -93,7 +89,7 @@ def update(value, attributes) end end - def aggregate_metric_data(start_time, end_time, cardinality_limit, aggregation: nil) + def aggregate_metric_data(start_time, end_time, aggregation: nil) aggregator = aggregation || @default_aggregation is_monotonic = aggregator.respond_to?(:monotonic?) ? aggregator.monotonic? : nil aggregation_temporality = aggregator.respond_to?(:aggregation_temporality) ? aggregator.aggregation_temporality : nil @@ -105,7 +101,7 @@ def aggregate_metric_data(start_time, end_time, cardinality_limit, aggregation: @instrument_kind, @meter_provider.resource, @instrumentation_scope, - aggregator.collect(start_time, end_time, @data_points, cardinality_limit), + aggregator.collect(start_time, end_time, @data_points), aggregation_temporality, start_time, end_time, @@ -120,7 +116,8 @@ def find_registered_view end def resolve_cardinality_limit(view) - view.aggregation_cardinality_limit || @cardinality_limit || DEFAULT_CARDINALITY_LIMIT + cardinality_limit = view.aggregation_cardinality_limit || @cardinality_limit || DEFAULT_CARDINALITY_LIMIT + [cardinality_limit, 0].max # if cardinality_limit is negative, then give it 0 end def to_s diff --git a/metrics_sdk/test/integration/periodic_metric_reader_test.rb b/metrics_sdk/test/integration/periodic_metric_reader_test.rb index 1686ca2dcf..6cdb6fc76f 100644 --- a/metrics_sdk/test/integration/periodic_metric_reader_test.rb +++ b/metrics_sdk/test/integration/periodic_metric_reader_test.rb @@ -1,261 +1,260 @@ # frozen_string_literal: true -# # frozen_string_literal: true +# Copyright The OpenTelemetry Authors +# +# SPDX-License-Identifier: Apache-2.0 -# # Copyright The OpenTelemetry Authors -# # -# # SPDX-License-Identifier: Apache-2.0 +require 'test_helper' +require 'json' -# require 'test_helper' -# require 'json' +describe OpenTelemetry::SDK do + describe '#periodic_metric_reader' do + before do + reset_metrics_sdk + @original_temp = ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] + ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] = 'delta' + end -# describe OpenTelemetry::SDK do -# describe '#periodic_metric_reader' do -# before do -# reset_metrics_sdk -# @original_temp = ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] -# ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] = 'delta' -# end + after do + ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] = @original_temp + end -# after do -# ENV['OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE'] = @original_temp -# end + # OTLP cannot export a metric without data points + it 'does not export metrics without data points' do + OpenTelemetry::SDK.configure -# # OTLP cannot export a metric without data points -# it 'does not export metrics without data points' do -# OpenTelemetry::SDK.configure + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) -# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new -# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) + OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) -# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + meter = OpenTelemetry.meter_provider.meter('test') + meter.create_histogram('example', unit: 's', description: 'test') -# meter = OpenTelemetry.meter_provider.meter('test') -# meter.create_histogram('example', unit: 's', description: 'test') + sleep(1) -# sleep(1) + periodic_metric_reader.shutdown + snapshot = metric_exporter.metric_snapshots -# periodic_metric_reader.shutdown -# snapshot = metric_exporter.metric_snapshots + assert_empty snapshot + end -# assert_empty snapshot -# end + it 'does not export metrics without data points when they have a view' do + OpenTelemetry::SDK.configure -# it 'does not export metrics without data points when they have a view' do -# OpenTelemetry::SDK.configure + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) -# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new -# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) + OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) -# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + boundaries = [0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10] -# boundaries = [0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10] + OpenTelemetry.meter_provider.add_view('http.server.request.duration', + type: :histogram, + aggregation: OpenTelemetry::SDK::Metrics::Aggregation::ExplicitBucketHistogram.new(boundaries: boundaries)) -# OpenTelemetry.meter_provider.add_view('http.server.request.duration', -# type: :histogram, -# aggregation: OpenTelemetry::SDK::Metrics::Aggregation::ExplicitBucketHistogram.new(boundaries: boundaries)) + meter = OpenTelemetry.meter_provider.meter('test') + meter.create_histogram('http.server.request.duration', unit: 's', description: 'Duration of HTTP server requests.') -# meter = OpenTelemetry.meter_provider.meter('test') -# meter.create_histogram('http.server.request.duration', unit: 's', description: 'Duration of HTTP server requests.') + sleep(8) -# sleep(8) + periodic_metric_reader.shutdown + snapshot = metric_exporter.metric_snapshots -# periodic_metric_reader.shutdown -# snapshot = metric_exporter.metric_snapshots + assert_empty snapshot + end -# assert_empty snapshot -# end + it 'emits 2 metrics after 10 seconds' do + OpenTelemetry::SDK.configure -# it 'emits 2 metrics after 10 seconds' do -# OpenTelemetry::SDK.configure + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) -# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new -# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) + OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) -# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + meter = OpenTelemetry.meter_provider.meter('test') + counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') -# meter = OpenTelemetry.meter_provider.meter('test') -# counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') + counter.add(1) + counter.add(2, attributes: { 'a' => 'b' }) + counter.add(2, attributes: { 'a' => 'b' }) + counter.add(3, attributes: { 'b' => 'c' }) + counter.add(4, attributes: { 'd' => 'e' }) -# counter.add(1) -# counter.add(2, attributes: { 'a' => 'b' }) -# counter.add(2, attributes: { 'a' => 'b' }) -# counter.add(3, attributes: { 'b' => 'c' }) -# counter.add(4, attributes: { 'd' => 'e' }) + sleep(8) -# sleep(8) + counter.add(5) + counter.add(6) -# counter.add(5) -# counter.add(6) + periodic_metric_reader.shutdown + snapshot = metric_exporter.metric_snapshots -# periodic_metric_reader.shutdown -# snapshot = metric_exporter.metric_snapshots + _(snapshot.size).must_equal(2) -# _(snapshot.size).must_equal(2) + _(snapshot[0].name).must_equal('counter') + _(snapshot[0].unit).must_equal('smidgen') + _(snapshot[0].description).must_equal('a small amount of something') -# _(snapshot[0].name).must_equal('counter') -# _(snapshot[0].unit).must_equal('smidgen') -# _(snapshot[0].description).must_equal('a small amount of something') + _(snapshot[0].instrumentation_scope.name).must_equal('test') -# _(snapshot[0].instrumentation_scope.name).must_equal('test') + _(snapshot[0].data_points[0].value).must_equal(1) + _(snapshot[0].data_points[0].attributes).must_equal({}) -# _(snapshot[0].data_points[0].value).must_equal(1) -# _(snapshot[0].data_points[0].attributes).must_equal({}) + _(snapshot[0].data_points[1].value).must_equal(4) + _(snapshot[0].data_points[1].attributes).must_equal('a' => 'b') -# _(snapshot[0].data_points[1].value).must_equal(4) -# _(snapshot[0].data_points[1].attributes).must_equal('a' => 'b') + _(snapshot[0].data_points[2].value).must_equal(3) + _(snapshot[0].data_points[2].attributes).must_equal('b' => 'c') -# _(snapshot[0].data_points[2].value).must_equal(3) -# _(snapshot[0].data_points[2].attributes).must_equal('b' => 'c') + _(snapshot[0].data_points[3].value).must_equal(4) + _(snapshot[0].data_points[3].attributes).must_equal('d' => 'e') -# _(snapshot[0].data_points[3].value).must_equal(4) -# _(snapshot[0].data_points[3].attributes).must_equal('d' => 'e') + _(snapshot[1].data_points.size).must_equal(1) + _(snapshot[1].data_points[0].value).must_equal(11) -# _(snapshot[1].data_points.size).must_equal(1) -# _(snapshot[1].data_points[0].value).must_equal(11) + _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false + end -# _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false -# end + it 'emits 1 metric after 1 second when interval is > 1 second' do + OpenTelemetry::SDK.configure -# it 'emits 1 metric after 1 second when interval is > 1 second' do -# OpenTelemetry::SDK.configure + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) -# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new -# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) + OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) -# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + meter = OpenTelemetry.meter_provider.meter('test') + counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') -# meter = OpenTelemetry.meter_provider.meter('test') -# counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') + counter.add(1) + counter.add(2, attributes: { 'a' => 'b' }) + counter.add(2, attributes: { 'a' => 'b' }) + counter.add(3, attributes: { 'b' => 'c' }) + counter.add(4, attributes: { 'd' => 'e' }) -# counter.add(1) -# counter.add(2, attributes: { 'a' => 'b' }) -# counter.add(2, attributes: { 'a' => 'b' }) -# counter.add(3, attributes: { 'b' => 'c' }) -# counter.add(4, attributes: { 'd' => 'e' }) + sleep(1) -# sleep(1) + periodic_metric_reader.shutdown + snapshot = metric_exporter.metric_snapshots -# periodic_metric_reader.shutdown -# snapshot = metric_exporter.metric_snapshots + _(snapshot.size).must_equal(1) + _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false + end -# _(snapshot.size).must_equal(1) -# _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false -# end + unless Gem.win_platform? || %w[jruby truffleruby].include?(RUBY_ENGINE) # forking is not available on these platforms or runtimes + it 'is restarted after forking' do + OpenTelemetry::SDK.configure -# unless Gem.win_platform? || %w[jruby truffleruby].include?(RUBY_ENGINE) # forking is not available on these platforms or runtimes -# it 'is restarted after forking' do -# OpenTelemetry::SDK.configure + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) -# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new -# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 5000, export_timeout_millis: 5000, exporter: metric_exporter) + OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) -# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + read, write = IO.pipe -# read, write = IO.pipe + pid = fork do + meter = OpenTelemetry.meter_provider.meter('test') + counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') -# pid = fork do -# meter = OpenTelemetry.meter_provider.meter('test') -# counter = meter.create_counter('counter', unit: 'smidgen', description: 'a small amount of something') + counter.add(1) + counter.add(2, attributes: { 'a' => 'b' }) + counter.add(2, attributes: { 'a' => 'b' }) + counter.add(3, attributes: { 'b' => 'c' }) + counter.add(4, attributes: { 'd' => 'e' }) -# counter.add(1) -# counter.add(2, attributes: { 'a' => 'b' }) -# counter.add(2, attributes: { 'a' => 'b' }) -# counter.add(3, attributes: { 'b' => 'c' }) -# counter.add(4, attributes: { 'd' => 'e' }) + sleep(8) + snapshot = metric_exporter.metric_snapshots -# sleep(8) -# snapshot = metric_exporter.metric_snapshots + json = snapshot.map { |reading| { name: reading.name } }.to_json + write.puts json + end -# json = snapshot.map { |reading| { name: reading.name } }.to_json -# write.puts json -# end + Timeout.timeout(10) do + Process.waitpid(pid) + end -# Timeout.timeout(10) do -# Process.waitpid(pid) -# end + periodic_metric_reader.shutdown + snapshot = JSON.parse(read.gets.chomp) + _(snapshot.size).must_equal(1) + _(snapshot[0]).must_equal('name' => 'counter') + _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false + end + end -# periodic_metric_reader.shutdown -# snapshot = JSON.parse(read.gets.chomp) -# _(snapshot.size).must_equal(1) -# _(snapshot[0]).must_equal('name' => 'counter') -# _(periodic_metric_reader.instance_variable_get(:@thread).alive?).must_equal false -# end -# end + it 'shutdown break the export interval cycle' do + OpenTelemetry::SDK.configure -# it 'shutdown break the export interval cycle' do -# OpenTelemetry::SDK.configure + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 1_000_000, export_timeout_millis: 10_000, exporter: metric_exporter) -# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new -# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new(export_interval_millis: 1_000_000, export_timeout_millis: 10_000, exporter: metric_exporter) + OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) -# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + _(periodic_metric_reader.alive?).must_equal true -# _(periodic_metric_reader.alive?).must_equal true + sleep 5 # make sure the work thread start -# sleep 5 # make sure the work thread start + Timeout.timeout(2) do # Fail if this block takes more than 2 seconds + periodic_metric_reader.shutdown + end -# Timeout.timeout(2) do # Fail if this block takes more than 2 seconds -# periodic_metric_reader.shutdown -# end + _(periodic_metric_reader.alive?).must_equal false + end -# _(periodic_metric_reader.alive?).must_equal false -# end + describe 'cardinality limit' do + it 'accepts cardinality_limit parameter on initialization' do + OpenTelemetry::SDK.configure -# describe 'cardinality limit' do -# it 'accepts cardinality_limit parameter on initialization' do -# OpenTelemetry::SDK.configure + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new( + export_interval_millis: 100, + export_timeout_millis: 1000, + exporter: metric_exporter, + aggregation_cardinality_limit: 50 + ) -# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new -# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new( -# export_interval_millis: 100, -# export_timeout_millis: 1000, -# exporter: metric_exporter, -# cardinality_limit: 50 -# ) + OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) -# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + _(periodic_metric_reader.alive?).must_equal true + periodic_metric_reader.shutdown + end -# _(periodic_metric_reader.alive?).must_equal true -# periodic_metric_reader.shutdown -# end + it 'passes cardinality limit to metric collection' do + OpenTelemetry::SDK.configure -# it 'passes cardinality limit to metric collection' do -# OpenTelemetry::SDK.configure + metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new + periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new( + export_interval_millis: 100, + export_timeout_millis: 1000, + exporter: metric_exporter, + aggregation_cardinality_limit: 2 + ) -# metric_exporter = OpenTelemetry::SDK::Metrics::Export::InMemoryMetricPullExporter.new -# periodic_metric_reader = OpenTelemetry::SDK::Metrics::Export::PeriodicMetricReader.new( -# export_interval_millis: 100, -# export_timeout_millis: 1000, -# exporter: metric_exporter, -# cardinality_limit: 2 -# ) + OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) -# OpenTelemetry.meter_provider.add_metric_reader(periodic_metric_reader) + meter = OpenTelemetry.meter_provider.meter('test') + counter = meter.create_counter('test_counter') -# meter = OpenTelemetry.meter_provider.meter('test') -# counter = meter.create_counter('test_counter') + # Add more data points than the cardinality limit + counter.add(1, attributes: { 'key' => 'a' }) + counter.add(2, attributes: { 'key' => 'b' }) + counter.add(3, attributes: { 'key' => 'c' }) # Should trigger overflow + counter.add(4, attributes: { 'key' => 'e' }) -# # Add more data points than the cardinality limit -# counter.add(1, attributes: { 'key' => 'a' }) -# counter.add(2, attributes: { 'key' => 'b' }) -# counter.add(3, attributes: { 'key' => 'c' }) # Should trigger overflow + # Wait for collection + sleep(0.2) -# # Wait for collection -# sleep(0.2) + periodic_metric_reader.shutdown + snapshot = metric_exporter.metric_snapshots -# periodic_metric_reader.shutdown -# snapshot = metric_exporter.metric_snapshots + _(snapshot).wont_be_empty + _(snapshot[0].data_points.size).must_equal(3) # Limited by cardinality -# _(snapshot).wont_be_empty -# _(snapshot[0].data_points.size).must_equal(2) # Limited by cardinality - -# # Find overflow data point -# overflow_point = snapshot[0].data_points.find do |dp| -# dp.attributes == { 'otel.metric.overflow' => true } -# end -# _(overflow_point).wont_be_nil -# end -# end -# end -# end + # Find overflow data point + overflow_point = snapshot[0].data_points.find do |dp| + dp.attributes == { 'otel.metric.overflow' => true } + end + _(overflow_point).wont_be_nil + end + end + end +end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb index 96f99d4817..c129169b67 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/drop_test.rb @@ -21,7 +21,7 @@ it 'sets the timestamps' do drop_aggregation.update(0, {}, data_points, cardinality_limit) - ndp = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit)[0] + ndp = drop_aggregation.collect(start_time, end_time, data_points)[0] _(ndp.start_time_unix_nano).must_equal(0) _(ndp.time_unix_nano).must_equal(0) end @@ -33,7 +33,7 @@ drop_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) drop_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) - ndps = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = drop_aggregation.collect(start_time, end_time, data_points) _(ndps.size).must_equal(2) _(ndps[0].value).must_equal(0) @@ -44,47 +44,19 @@ end describe 'cardinality limit' do - let(:cardinality_limit) { 2 } - it 'respects cardinality limit but still drops all values' do + cardinality_limit = 2 drop_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) drop_aggregation.update(20, { 'key' => 'b' }, data_points, cardinality_limit) drop_aggregation.update(30, { 'key' => 'c' }, data_points, cardinality_limit) # Should be limited drop_aggregation.update(40, { 'key' => 'd' }, data_points, cardinality_limit) # Should be limited - ndps = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = drop_aggregation.collect(start_time, end_time, data_points) # All values should be 0 regardless of cardinality limit ndps.each do |ndp| _(ndp.value).must_equal(0) end end - - describe 'edge cases' do - it 'handles cardinality limit of 0 gracefully' do - cardinality_limit = 0 - drop_aggregation.update(100, { 'key' => 'value' }, data_points, cardinality_limit) - - ndps = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit) - - # Drop aggregation should still produce data points even with 0 limit - _(ndps).wont_be_empty - ndps.each { |ndp| _(ndp.value).must_equal(0) } - end - - it 'maintains drop behavior with very high cardinality' do - cardinality_limit = 1000 - - 100.times do |i| - drop_aggregation.update(i * 10, { 'key' => "value_#{i}" }, data_points, cardinality_limit) - end - - ndps = drop_aggregation.collect(start_time, end_time, data_points, cardinality_limit) - - # All values should still be 0 - ndps.each { |ndp| _(ndp.value).must_equal(0) } - _(ndps.size).must_equal(100) # All data points should be present - end - end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb index d7ab848606..44b0644b0b 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/explicit_bucket_histogram_test.rb @@ -104,7 +104,7 @@ ebh.update(55, { 'foo' => 'bar' }, data_points, cardinality_limit) ebh.update(80, { 'foo' => 'bar' }, data_points, cardinality_limit) - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + hdps = ebh.collect(start_time, end_time, data_points) _(hdps.size).must_equal(2) _(hdps[0].attributes).must_equal({}) _(hdps[0].count).must_equal(5) @@ -125,7 +125,7 @@ it 'sets the timestamps' do ebh.update(0, {}, data_points, cardinality_limit) - hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] + hdp = ebh.collect(start_time, end_time, data_points)[0] _(hdp.start_time_unix_nano).must_equal(start_time) _(hdp.time_unix_nano).must_equal(end_time) end @@ -135,7 +135,7 @@ ebh.update(0, {}, data_points, cardinality_limit) ebh.update(0, {}, data_points, cardinality_limit) ebh.update(0, {}, data_points, cardinality_limit) - hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] + hdp = ebh.collect(start_time, end_time, data_points)[0] _(hdp.count).must_equal(4) end @@ -145,7 +145,7 @@ ebh.update(5, {}, data_points, cardinality_limit) ebh.update(6, {}, data_points, cardinality_limit) ebh.update(10, {}, data_points, cardinality_limit) - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + hdps = ebh.collect(start_time, end_time, data_points) ebh.update(0, {}, data_points, cardinality_limit) ebh.update(1, {}, data_points, cardinality_limit) @@ -160,7 +160,7 @@ _(hdps[0].max).must_equal(10) _(hdps[0].bucket_counts).must_equal([1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0]) - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + hdps = ebh.collect(start_time, end_time, data_points) # Assert that we are not accumulating values # between calls to collect _(hdps[0].count).must_equal(5) @@ -179,7 +179,7 @@ ebh.update(5, {}, data_points, cardinality_limit) ebh.update(6, {}, data_points, cardinality_limit) ebh.update(10, {}, data_points, cardinality_limit) - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + hdps = ebh.collect(start_time, end_time, data_points) ebh.update(0, {}, data_points, cardinality_limit) ebh.update(1, {}, data_points, cardinality_limit) @@ -194,7 +194,7 @@ _(hdps[0].max).must_equal(10) _(hdps[0].bucket_counts).must_equal([1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0]) - hdps1 = ebh.collect(start_time, end_time, data_points, cardinality_limit) + hdps1 = ebh.collect(start_time, end_time, data_points) # Assert that we are accumulating values # and not just capturing the delta since # the previous collect call @@ -248,7 +248,7 @@ ebh.update(1001, {}, data_points, cardinality_limit) - hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] + hdp = ebh.collect(start_time, end_time, data_points)[0] _(hdp.bucket_counts).must_equal([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]) _(hdp.sum).must_equal(4040) _(hdp.min).must_equal(0) @@ -260,7 +260,7 @@ it 'sorts it' do ebh.update(0, {}, data_points, cardinality_limit) - _(ebh.collect(start_time, end_time, data_points, cardinality_limit)[0].explicit_bounds).must_equal([1, 2, 4]) + _(ebh.collect(start_time, end_time, data_points)[0].explicit_bounds).must_equal([1, 2, 4]) end end @@ -269,7 +269,7 @@ it 'does not record min max values' do ebh.update(-1, {}, data_points, cardinality_limit) - hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] + hdp = ebh.collect(start_time, end_time, data_points)[0] _(hdp.min).must_be_nil _(hdp.min).must_be_nil end @@ -286,7 +286,7 @@ ebh.update(3, {}, data_points, cardinality_limit) ebh.update(4, {}, data_points, cardinality_limit) ebh.update(5, {}, data_points, cardinality_limit) - hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] + hdp = ebh.collect(start_time, end_time, data_points)[0] _(hdp.bucket_counts).must_equal([2, 2, 2, 1]) end @@ -298,7 +298,7 @@ it 'aggregates' do ebh.update(-1, {}, data_points, cardinality_limit) ebh.update(1, {}, data_points, cardinality_limit) - hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] + hdp = ebh.collect(start_time, end_time, data_points)[0] _(hdp.bucket_counts).must_equal([1, 1]) end @@ -310,7 +310,7 @@ it 'aggregates but does not record bucket counts' do ebh.update(-1, {}, data_points, cardinality_limit) ebh.update(3, {}, data_points, cardinality_limit) - hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] + hdp = ebh.collect(start_time, end_time, data_points)[0] _(hdp.bucket_counts).must_be_nil _(hdp.explicit_bounds).must_be_nil @@ -327,7 +327,7 @@ it 'aggregates but does not record bucket counts' do ebh.update(-1, {}, data_points, cardinality_limit) ebh.update(3, {}, data_points, cardinality_limit) - hdp = ebh.collect(start_time, end_time, data_points, cardinality_limit)[0] + hdp = ebh.collect(start_time, end_time, data_points)[0] _(hdp.bucket_counts).must_be_nil _(hdp.explicit_bounds).must_be_nil @@ -340,107 +340,72 @@ end describe 'cardinality limit' do - let(:cardinality_limit) { 2 } - - it 'creates overflow data point when cardinality limit is exceeded' do + it 'handles overflow scenarios and merges measurements correctly' do + cardinality_limit = 2 + # Test basic overflow behavior and multiple overflow merging in one flow ebh.update(1, { 'key' => 'a' }, data_points, cardinality_limit) ebh.update(5, { 'key' => 'b' }, data_points, cardinality_limit) ebh.update(10, { 'key' => 'c' }, data_points, cardinality_limit) # This should overflow - - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) - - _(hdps.size).must_equal(3) - - overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } - _(overflow_point).wont_be_nil - _(overflow_point.count).must_equal(1) - _(overflow_point.sum).must_equal(10) - end - - it 'merges multiple overflow measurements correctly' do - ebh.update(1, { 'key' => 'a' }, data_points, cardinality_limit) - ebh.update(5, { 'key' => 'b' }, data_points, cardinality_limit) - ebh.update(10, { 'key' => 'c' }, data_points, cardinality_limit) # Overflow ebh.update(15, { 'key' => 'd' }, data_points, cardinality_limit) # Also overflow - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + hdps = ebh.collect(start_time, end_time, data_points) - _(hdps.size).must_equal(3) + _(hdps.size).must_equal(3) # 2 regular + 1 overflow overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } - _(overflow_point.count).must_equal(2) + _(overflow_point).wont_be_nil + _(overflow_point.count).must_equal(2) # Both overflow measurements merged _(overflow_point.sum).must_equal(25) # 10 + 15 end describe 'edge cases' do - it 'handles cardinality limit of 0' do + it 'handles cardinality limit 0' do + # Test cardinality limit of 0 - everything overflows cardinality_limit = 0 ebh.update(5, { 'key' => 'value' }, data_points, cardinality_limit) - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + hdps = ebh.collect(start_time, end_time, data_points) _(hdps.size).must_equal(1) overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } _(overflow_point).wont_be_nil _(overflow_point.count).must_equal(1) _(overflow_point.sum).must_equal(5) + _(overflow_point.min).must_equal(5) + _(overflow_point.max).must_equal(5) end - it 'handles very large cardinality scenarios' do - cardinality_limit = 100 - - # Add 150 unique attribute sets - 150.times do |i| - ebh.update(i, { 'unique_key' => "value_#{i}" }, data_points, cardinality_limit) - end - - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) - - _(hdps.size).must_equal(101) # Should be limited to cardinality_limit - overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } - _(overflow_point).wont_be_nil - _(overflow_point.count).must_equal(50) # 150 - 100 - end - - it 'preserves bucket counts in overflow histogram' do + it 'handles cardinality limit 1' do + # Test cardinality limit of 1 with bucket counts preservation cardinality_limit = 1 ebh.update(1, { 'key' => 'a' }, data_points, cardinality_limit) ebh.update(10, { 'key' => 'b' }, data_points, cardinality_limit) # Overflow ebh.update(100, { 'key' => 'c' }, data_points, cardinality_limit) # More overflow - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) + hdps = ebh.collect(start_time, end_time, data_points) overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } # Check bucket counts are properly merged _(overflow_point.bucket_counts).wont_be_nil _(overflow_point.bucket_counts.sum).must_equal(overflow_point.count) + _(overflow_point.min).must_equal(10) + _(overflow_point.max).must_equal(100) end - it 'handles min/max correctly in overflow scenarios' do - cardinality_limit = 1 - ebh.update(50, { 'key' => 'a' }, data_points, cardinality_limit) - ebh.update(5, { 'key' => 'b' }, data_points, cardinality_limit) # Overflow with smaller value - ebh.update(500, { 'key' => 'c' }, data_points, cardinality_limit) # Overflow with larger value - - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) - overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } - - _(overflow_point.min).must_equal(5) - _(overflow_point.max).must_equal(500) - end + it 'handles very large cardinality scenarios' do + cardinality_limit = 100 - describe 'with record_min_max disabled' do - it 'does not track min/max in overflow' do - cardinality_limit = 1 - ebh.update(50, { 'key' => 'a' }, data_points, cardinality_limit) - ebh.update(5, { 'key' => 'b' }, data_points, cardinality_limit) + # Add 150 unique attribute sets + 150.times do |i| + ebh.update(i, { 'unique_key' => "value_#{i}" }, data_points, cardinality_limit) + end - hdps = ebh.collect(start_time, end_time, data_points, cardinality_limit) - overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + hdps = ebh.collect(start_time, end_time, data_points) - _(overflow_point.min).must_equal 5 - _(overflow_point.max).must_equal 5 - end + _(hdps.size).must_equal(101) # 100 + 1 + overflow_point = hdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } + _(overflow_point).wont_be_nil + _(overflow_point.count).must_equal(50) # 150 - 100 end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb index e2a07cd979..623d540f6b 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb @@ -36,7 +36,7 @@ expbh.update(1.45, { 'foo' => 'bar' }, data_points, cardinality_limit) expbh.update(1.67, { 'foo' => 'bar' }, data_points, cardinality_limit) - exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + exphdps = expbh.collect(start_time, end_time, data_points) _(exphdps.size).must_equal(2) _(exphdps[0].attributes).must_equal({}) @@ -85,7 +85,7 @@ expbh.update(4, {}, data_points, cardinality_limit) expbh.update(1, {}, data_points, cardinality_limit) - exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + exphdps = expbh.collect(start_time, end_time, data_points) _(exphdps.size).must_equal(1) _(exphdps[0].attributes).must_equal({}) @@ -121,7 +121,7 @@ expbh.update(8, {}, data_points, cardinality_limit) expbh.update(0.5, {}, data_points, cardinality_limit) - exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + exphdps = expbh.collect(start_time, end_time, data_points) _(exphdps.size).must_equal(1) _(exphdps[0].attributes).must_equal({}) @@ -185,7 +185,7 @@ expbh.update(value, {}, data_points, cardinality_limit) end - exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + exphdps = expbh.collect(start_time, end_time, data_points) assert_equal expected[:scale], exphdps[0].scale assert_equal expected[:offset], exphdps[0].positive.offset @@ -209,7 +209,7 @@ expbh.update(1, {}, data_points, cardinality_limit) expbh.update(2**-1074, {}, data_points, cardinality_limit) - exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + exphdps = expbh.collect(start_time, end_time, data_points) assert_equal Float::MAX, exphdps[0].sum assert_equal 3, exphdps[0].count @@ -232,7 +232,7 @@ expbh.update(value, {}, data_points, cardinality_limit) end - exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + exphdps = expbh.collect(start_time, end_time, data_points) assert_equal 1, exphdps[0].min assert_equal 9, exphdps[0].max @@ -247,7 +247,7 @@ expbh.update(value, {}, data_points, cardinality_limit) end - exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + exphdps = expbh.collect(start_time, end_time, data_points) assert_equal(-9, exphdps[0].min) assert_equal(-1, exphdps[0].max) @@ -280,34 +280,30 @@ end describe 'cardinality limit' do - let(:cardinality_limit) { 2 } + it 'handles overflow data points and merges overflow measurements correctly' do + cardinality_limit = 2 - it 'creates overflow data point when cardinality limit is exceeded' do expbh.update(1.5, { 'key' => 'a' }, data_points, cardinality_limit) expbh.update(2.5, { 'key' => 'b' }, data_points, cardinality_limit) - expbh.update(3.5, { 'key' => 'c' }, data_points, cardinality_limit) # This should overflow + expbh.update(3.5, { 'key' => 'c' }, data_points, cardinality_limit) # This should start overflow + expbh.update(4.5, { 'key' => 'd' }, data_points, cardinality_limit) + expbh.update(0, { 'key' => 'e' }, data_points, cardinality_limit) - exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) + exphdps = expbh.collect(start_time, end_time, data_points) _(exphdps.size).must_equal(3) overflow_point = exphdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } - _(overflow_point).wont_be_nil - _(overflow_point.count).must_equal(1) - _(overflow_point.sum).must_equal(3.5) - end - - it 'merges multiple overflow measurements correctly' do - expbh.update(1.5, { 'key' => 'a' }, data_points, cardinality_limit) - expbh.update(2.5, { 'key' => 'b' }, data_points, cardinality_limit) - expbh.update(3.5, { 'key' => 'c' }, data_points, cardinality_limit) # Overflow - expbh.update(4.5, { 'key' => 'd' }, data_points, cardinality_limit) # Also overflow - - exphdps = expbh.collect(start_time, end_time, data_points, cardinality_limit) - overflow_point = exphdps.find { |hdp| hdp.attributes == { 'otel.metric.overflow' => true } } - _(overflow_point.count).must_equal(2) - _(overflow_point.sum).must_equal(8.0) # 3.5 + 4.5 + _(overflow_point).wont_be_nil + _(overflow_point.count).must_equal(3) + _(overflow_point.sum).must_equal(8) + _(overflow_point.scale).must_equal(5) + _(overflow_point.min).must_equal(0) + _(overflow_point.max).must_equal(4.5) + _(overflow_point.positive.counts).must_equal([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]) + _(overflow_point.negative.counts).must_equal([0]) + _(overflow_point.zero_count).must_equal(1) end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb index 6456a7fe65..59d07f67e3 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/last_value_test.rb @@ -16,7 +16,7 @@ it 'sets the timestamps' do last_value_aggregation.update(0, {}, data_points, cardinality_limit) - ndp = last_value_aggregation.collect(start_time, end_time, data_points, cardinality_limit)[0] + ndp = last_value_aggregation.collect(start_time, end_time, data_points)[0] _(ndp.start_time_unix_nano).must_equal(start_time) _(ndp.time_unix_nano).must_equal(end_time) end @@ -28,7 +28,7 @@ last_value_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) last_value_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) - ndps = last_value_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = last_value_aggregation.collect(start_time, end_time, data_points) _(ndps[0].value).must_equal(2) _(ndps[0].attributes).must_equal({}, data_points) @@ -41,16 +41,23 @@ it 'creates overflow data point when cardinality limit is exceeded' do last_value_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) + last_value_aggregation.update(20, { 'key' => 'a' }, data_points, cardinality_limit) last_value_aggregation.update(20, { 'key' => 'b' }, data_points, cardinality_limit) last_value_aggregation.update(30, { 'key' => 'c' }, data_points, cardinality_limit) # This should overflow + last_value_aggregation.update(40, { 'key' => 'e' }, data_points, cardinality_limit) # This should overflow - ndps = last_value_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = last_value_aggregation.collect(start_time, end_time, data_points) - _(ndps.size).must_equal(2) + _(ndps.size).must_equal(3) + + assert_equal(ndps[0].attributes, { 'key' => 'a' }) + _(ndps[0].value).must_equal 20 + + assert_equal(ndps[1].attributes, { 'key' => 'b' }) + _(ndps[1].value).must_equal 20 overflow_point = ndps.find { |ndp| ndp.attributes == { 'otel.metric.overflow' => true } } - _(overflow_point).wont_be_nil - _(overflow_point.value).must_equal(30) + _(overflow_point.value).must_equal(40) end it 'updates existing attribute sets without triggering overflow' do @@ -64,39 +71,15 @@ end describe 'edge cases' do - it 'handles cardinality limit of 0' do + it 'handles cardinality limits 0' do + # Test cardinality limit of 0 - everything overflows cardinality_limit = 0 last_value_aggregation.update(42, { 'key' => 'value' }, data_points, cardinality_limit) + last_value_aggregation.update(30, { 'key' => 'value' }, data_points, cardinality_limit) _(data_points.size).must_equal(1) overflow_point = data_points[{ 'otel.metric.overflow' => true }] - _(overflow_point.value).must_equal(42) - end - - it 'replaces overflow value with latest when multiple overflow updates' do - cardinality_limit = 1 - last_value_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) - last_value_aggregation.update(20, { 'key' => 'b' }, data_points, cardinality_limit) # Overflow - last_value_aggregation.update(30, { 'key' => 'c' }, data_points, cardinality_limit) # Replace overflow - - _(data_points.size).must_equal(2) - assert_equal(data_points.keys[0], { 'key' => 'a' }) - _(data_points.values[0].value).must_equal 10 - - overflow_point = data_points[{ 'otel.metric.overflow' => true }] - _(overflow_point.value).must_equal(30) # LastValue behavior - only keep latest - end - - it 'handles negative values in overflow' do - cardinality_limit = 1 - last_value_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) - last_value_aggregation.update(-5, { 'key' => 'b' }, data_points, cardinality_limit) # Negative overflow - - assert_equal(data_points.keys[0], { 'key' => 'a' }) - _(data_points.values[0].value).must_equal 10 - - overflow_point = data_points[{ 'otel.metric.overflow' => true }] - _(overflow_point.value).must_equal(-5) + _(overflow_point.value).must_equal(30) end end end diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb index c4122d10d2..ee5c3ff0a0 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/sum_test.rb @@ -86,7 +86,7 @@ it 'sets the timestamps' do sum_aggregation.update(0, {}, data_points, cardinality_limit) - ndp = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit)[0] + ndp = sum_aggregation.collect(start_time, end_time, data_points)[0] _(ndp.start_time_unix_nano).must_equal(start_time) _(ndp.time_unix_nano).must_equal(end_time) end @@ -98,7 +98,7 @@ sum_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) sum_aggregation.update(2, { 'foo' => 'bar' }, data_points, cardinality_limit) - ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points) _(ndps[0].value).must_equal(3) _(ndps[0].attributes).must_equal({}, data_points) @@ -110,21 +110,21 @@ sum_aggregation.update(1, {}, data_points, cardinality_limit) sum_aggregation.update(-2, {}, data_points, cardinality_limit) - ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points) _(ndps[0].value).must_equal(-1) end it 'does not aggregate between collects' do sum_aggregation.update(1, {}, data_points, cardinality_limit) sum_aggregation.update(2, {}, data_points, cardinality_limit) - ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points) sum_aggregation.update(1, {}, data_points, cardinality_limit) # Assert that the recent update does not # impact the already collected metrics _(ndps[0].value).must_equal(3) - ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points) # Assert that we are not accumulating values # between calls to collect _(ndps[0].value).must_equal(1) @@ -136,14 +136,14 @@ it 'allows metrics to accumulate' do sum_aggregation.update(1, {}, data_points, cardinality_limit) sum_aggregation.update(2, {}, data_points, cardinality_limit) - ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points) sum_aggregation.update(1, {}, data_points, cardinality_limit) # Assert that the recent update does not # impact the already collected metrics _(ndps[0].value).must_equal(3) - ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points) # Assert that we are accumulating values # and not just capturing the delta since # the previous collect call @@ -158,101 +158,42 @@ it 'does not allow negative values to accumulate' do sum_aggregation.update(1, {}, data_points, cardinality_limit) sum_aggregation.update(-2, {}, data_points, cardinality_limit) - ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) + ndps = sum_aggregation.collect(start_time, end_time, data_points) _(ndps[0].value).must_equal(1) end end describe 'cardinality limit' do - let(:cardinality_limit) { 3 } - - it 'creates overflow data point when cardinality limit is exceeded' do - sum_aggregation.update(1, { 'key' => 'a' }, data_points, cardinality_limit) - sum_aggregation.update(2, { 'key' => 'b' }, data_points, cardinality_limit) - sum_aggregation.update(3, { 'key' => 'c' }, data_points, cardinality_limit) - sum_aggregation.update(4, { 'key' => 'd' }, data_points, cardinality_limit) # This should overflow - - ndps = sum_aggregation.collect(start_time, end_time, data_points, cardinality_limit) - - _(ndps.size).must_equal(4) - - overflow_point = ndps.find { |ndp| ndp.attributes == { 'otel.metric.overflow' => true } } - _(overflow_point).wont_be_nil - _(overflow_point.value).must_equal(4) - end - describe 'with cumulative aggregation' do it 'preserves pre-overflow attributes after overflow starts' do + cardinality_limit = 3 sum_aggregation.update(1, { 'key' => 'a' }, data_points, cardinality_limit) sum_aggregation.update(2, { 'key' => 'b' }, data_points, cardinality_limit) sum_aggregation.update(3, { 'key' => 'c' }, data_points, cardinality_limit) sum_aggregation.update(4, { 'key' => 'd' }, data_points, cardinality_limit) # This should overflow - - # Add more to a pre-overflow attribute + sum_aggregation.update(5, { 'key' => 'e' }, data_points, cardinality_limit) sum_aggregation.update(5, { 'key' => 'a' }, data_points, cardinality_limit) _(data_points.size).must_equal(4) # 3 original + 1 overflow _(data_points[{ 'key' => 'a' }].value).must_equal(6) # 1 + 5 + _(data_points[{ 'key' => 'b' }].value).must_equal(2) # 1 + 5 + _(data_points[{ 'otel.metric.overflow' => true }].value).must_equal(9) + _(data_points[{ 'key' => 'd' }]).must_be_nil + _(data_points[{ 'key' => 'e' }]).must_be_nil end end describe 'edge cases' do - it 'handles cardinality limit of 1' do - cardinality_limit = 1 - sum_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) - sum_aggregation.update(20, { 'key' => 'b' }, data_points, cardinality_limit) # Should overflow immediately - - _(data_points.size).must_equal(2) - overflow_point = data_points[{ 'otel.metric.overflow' => true }] - _(overflow_point).wont_be_nil - _(overflow_point.value).must_equal(20) - end - - it 'handles cardinality limit of 0' do + it 'handles cardinality limits 0' do + # Test cardinality limit of 0 - everything overflows cardinality_limit = 0 sum_aggregation.update(10, { 'key' => 'a' }, data_points, cardinality_limit) + sum_aggregation.update(12, { 'key' => 'd' }, data_points, cardinality_limit) _(data_points.size).must_equal(1) overflow_point = data_points[{ 'otel.metric.overflow' => true }] - _(overflow_point).wont_be_nil - _(overflow_point.value).must_equal(10) - end - - it 'accumulates multiple overflow values correctly' do - cardinality_limit = 2 - sum_aggregation.update(1, { 'key' => 'a' }, data_points, cardinality_limit) - sum_aggregation.update(2, { 'key' => 'b' }, data_points, cardinality_limit) - sum_aggregation.update(3, { 'key' => 'c' }, data_points, cardinality_limit) # Overflow - sum_aggregation.update(4, { 'key' => 'd' }, data_points, cardinality_limit) # More overflow - sum_aggregation.update(5, { 'key' => 'e' }, data_points, cardinality_limit) # Even more overflow - - _(data_points.size).must_equal(3) # 2 regular + 1 overflow - overflow_point = data_points[{ 'otel.metric.overflow' => true }] - _(overflow_point.value).must_equal(12) # 3 + 4 + 5 - end - - it 'handles empty attributes correctly with cardinality limit' do - cardinality_limit = 1 - sum_aggregation.update(10, {}, data_points, cardinality_limit) - sum_aggregation.update(20, { 'key' => 'value' }, data_points, cardinality_limit) # Should overflow - - _(data_points.size).must_equal(2) - overflow_point = data_points[{ 'otel.metric.overflow' => true }] - _(overflow_point).wont_be_nil - _(overflow_point.value).must_equal(20) - end - - it 'handles identical attribute sets correctly' do - cardinality_limit = 2 - attrs = { 'service' => 'test', 'endpoint' => '/api' } - sum_aggregation.update(1, attrs, data_points, cardinality_limit) - sum_aggregation.update(2, attrs, data_points, cardinality_limit) # Same attributes, should accumulate - sum_aggregation.update(3, { 'different' => 'attrs' }, data_points, cardinality_limit) - sum_aggregation.update(4, { 'another' => 'set' }, data_points, cardinality_limit) # Should overflow - - _(data_points.size).must_equal(3) # 2 unique + 1 overflow - _(data_points[attrs].value).must_equal(3) # 1 + 2 + _(overflow_point.value).must_equal(22) end end end From 240ff8adec63b7d464d81681be1cf8e76c45da59 Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Tue, 26 Aug 2025 13:05:01 -0400 Subject: [PATCH 07/13] fix --- .../lib/opentelemetry/sdk/metrics/state/metric_stream.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb index 6a69cbfb4f..af1a2d8a6e 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb @@ -74,7 +74,7 @@ def collect(start_time, end_time) # then we need to sort the entire data_points (~ 2000) based on time, which is time-consuming def update(value, attributes) if @registered_views.empty? - resolved_cardinality_limit = @cardinality_limit || DEFAULT_CARDINALITY_LIMIT + resolved_cardinality_limit = resolve_cardinality_limit(nil) @mutex.synchronize { @default_aggregation.update(value, attributes, @data_points, resolved_cardinality_limit) } else @@ -116,7 +116,7 @@ def find_registered_view end def resolve_cardinality_limit(view) - cardinality_limit = view.aggregation_cardinality_limit || @cardinality_limit || DEFAULT_CARDINALITY_LIMIT + cardinality_limit = view&.aggregation_cardinality_limit || @cardinality_limit || DEFAULT_CARDINALITY_LIMIT [cardinality_limit, 0].max # if cardinality_limit is negative, then give it 0 end From 9ec35e185e11e5dc0054e07aea4064a61a638be7 Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Tue, 26 Aug 2025 14:17:18 -0400 Subject: [PATCH 08/13] update otlp metrics exporter for cardinality --- .../opentelemetry/exporter/otlp/metrics/metrics_exporter.rb | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/exporter/otlp-metrics/lib/opentelemetry/exporter/otlp/metrics/metrics_exporter.rb b/exporter/otlp-metrics/lib/opentelemetry/exporter/otlp/metrics/metrics_exporter.rb index eacf0b3dc4..1940d44772 100644 --- a/exporter/otlp-metrics/lib/opentelemetry/exporter/otlp/metrics/metrics_exporter.rb +++ b/exporter/otlp-metrics/lib/opentelemetry/exporter/otlp/metrics/metrics_exporter.rb @@ -52,12 +52,13 @@ def initialize(endpoint: OpenTelemetry::Common::Utilities.config_opt('OTEL_EXPOR ssl_verify_mode: MetricsExporter.ssl_verify_mode, headers: OpenTelemetry::Common::Utilities.config_opt('OTEL_EXPORTER_OTLP_METRICS_HEADERS', 'OTEL_EXPORTER_OTLP_HEADERS', default: {}), compression: OpenTelemetry::Common::Utilities.config_opt('OTEL_EXPORTER_OTLP_METRICS_COMPRESSION', 'OTEL_EXPORTER_OTLP_COMPRESSION', default: 'gzip'), - timeout: OpenTelemetry::Common::Utilities.config_opt('OTEL_EXPORTER_OTLP_METRICS_TIMEOUT', 'OTEL_EXPORTER_OTLP_TIMEOUT', default: 10)) + timeout: OpenTelemetry::Common::Utilities.config_opt('OTEL_EXPORTER_OTLP_METRICS_TIMEOUT', 'OTEL_EXPORTER_OTLP_TIMEOUT', default: 10), + aggregation_cardinality_limit: nil) raise ArgumentError, "invalid url for OTLP::MetricsExporter #{endpoint}" unless OpenTelemetry::Common::Utilities.valid_url?(endpoint) raise ArgumentError, "unsupported compression key #{compression}" unless compression.nil? || %w[gzip none].include?(compression) # create the MetricStore object - super() + super(aggregation_cardinality_limit: aggregation_cardinality_limit) @uri = if endpoint == ENV['OTEL_EXPORTER_OTLP_ENDPOINT'] URI.join(endpoint, 'v1/metrics') From 90aab7eb74a93726c90b4b02d118ba7274df9e1f Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Tue, 26 Aug 2025 15:10:45 -0400 Subject: [PATCH 09/13] fix test case --- .../metrics/aggregation/exponential_bucket_histogram.rb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb index f1d40607d1..c528e110fc 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb @@ -84,6 +84,10 @@ def update(amount, attributes, data_points, cardinality_limit) nil end + def aggregation_temporality + @aggregation_temporality.temporality + end + private def create_new_data_point(attributes, data_points) @@ -186,10 +190,6 @@ def grow_buckets(span, buckets) buckets.grow(span + 1, @size) end - def aggregation_temporality - @aggregation_temporality.temporality - end - def new_mapping(scale) scale = validate_scale(scale) scale <= 0 ? ExponentialHistogram::ExponentMapping.new(scale) : ExponentialHistogram::LogarithmMapping.new(scale) From 07b1c87951ca08c537d652b6c4e51f410d5dd7a7 Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Tue, 26 Aug 2025 15:13:02 -0400 Subject: [PATCH 10/13] remove testing comments --- .../sdk/metrics/aggregation/exponential_bucket_histogram.rb | 3 --- 1 file changed, 3 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb index c528e110fc..b3664d8537 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram.rb @@ -200,9 +200,6 @@ def empty_counts end def get_scale_change(low, high) - # puts "get_scale_change: low: #{low}, high: #{high}, @size: #{@size}" - # python code also produce 18 with 0,1048575, the high is little bit off - # just checked, the mapping is also ok, produce the 1048575 change = 0 while high - low >= @size high >>= 1 From 595e4c6ebb7b8ac44e0defa01e97220d78807ea8 Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Tue, 2 Sep 2025 15:30:31 -0400 Subject: [PATCH 11/13] merge --- .../metrics/aggregation/exponential_bucket_histogram_test.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb index fb0fbd7f8d..cc322434ea 100644 --- a/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb +++ b/metrics_sdk/test/opentelemetry/sdk/metrics/aggregation/exponential_bucket_histogram_test.rb @@ -144,8 +144,8 @@ zero_threshold: 0 ) - expbh.update(0, {}, data_points) - expbh.update(10_000, {}, data_points) + expbh.update(0, {}, data_points, cardinality_limit) + expbh.update(10_000, {}, data_points, cardinality_limit) exphdps = expbh.collect(start_time, end_time, data_points) From 4022e045e966e9f370fe936bf591ceaa3f48be27 Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Tue, 28 Oct 2025 13:43:15 -0400 Subject: [PATCH 12/13] resolve merge issue --- .../sdk/metrics/state/asynchronous_metric_stream.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb index 8cca8b6830..61301d5a5f 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb @@ -54,7 +54,7 @@ def invoke_callback(timeout, attributes) @mutex.synchronize do @callback.each do |cb| value = safe_guard_callback(cb, timeout: timeout) - @default_aggregation.update(value, attributes, @data_points) if value.is_a?(Numeric) + @default_aggregation.update(value, attributes, @data_points, resolved_cardinality_limit) if value.is_a?(Numeric) end end else @@ -68,7 +68,7 @@ def invoke_callback(timeout, attributes) merged_attributes = attributes || {} merged_attributes.merge!(view.attribute_keys) - view.aggregation.update(value, merged_attributes, @data_points) if view.valid_aggregation? + view.aggregation.update(value, merged_attributes, @data_points, resolved_cardinality_limit) if view.valid_aggregation? end end end From f6323f3617ddba94ea6ff59663dcc4c7fbf1039b Mon Sep 17 00:00:00 2001 From: xuan-cao-swi Date: Mon, 3 Nov 2025 14:26:11 -0500 Subject: [PATCH 13/13] resolve merge issue --- .../sdk/metrics/state/asynchronous_metric_stream.rb | 4 ++-- .../opentelemetry/sdk/metrics/state/metric_stream.rb | 12 ++++++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb index 19123a5881..89f2838aeb 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/asynchronous_metric_stream.rb @@ -58,7 +58,7 @@ def invoke_callback(timeout, attributes) end end else - @registered_views.each do |view| + @registered_views.each do |view, data_points| resolved_cardinality_limit = resolve_cardinality_limit(view) @mutex.synchronize do @@ -68,7 +68,7 @@ def invoke_callback(timeout, attributes) merged_attributes = attributes || {} merged_attributes.merge!(view.attribute_keys) - view.aggregation.update(value, merged_attributes, @data_points, resolved_cardinality_limit) if view.valid_aggregation? + view.aggregation.update(value, merged_attributes, data_points, resolved_cardinality_limit) if view.valid_aggregation? end end end diff --git a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb index d5de4d979f..cb2cb942b0 100644 --- a/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb +++ b/metrics_sdk/lib/opentelemetry/sdk/metrics/state/metric_stream.rb @@ -12,6 +12,8 @@ module State # # The MetricStream class provides SDK internal functionality that is not a part of the # public API. + # + # rubocop:disable Metrics/ClassLength class MetricStream attr_reader :name, :description, :unit, :instrument_kind, :instrumentation_scope, :data_points attr_writer :cardinality_limit @@ -56,10 +58,11 @@ def collect(start_time, end_time) metric_data << aggregate_metric_data(start_time, end_time) else - @registered_views.each do |view| + @registered_views.each do |view, data_points| metric_data << aggregate_metric_data(start_time, end_time, - aggregation: view.aggregation) + aggregation: view.aggregation, + data_points: data_points) end end @@ -79,12 +82,12 @@ def update(value, attributes) @mutex.synchronize { @default_aggregation.update(value, attributes, @data_points, resolved_cardinality_limit) } else - @registered_views.each do |view| + @registered_views.each do |view, data_points| resolved_cardinality_limit = resolve_cardinality_limit(view) @mutex.synchronize do attributes ||= {} attributes.merge!(view.attribute_keys) - view.aggregation.update(value, attributes, @data_points, resolved_cardinality_limit) if view.valid_aggregation? + view.aggregation.update(value, attributes, data_points, resolved_cardinality_limit) if view.valid_aggregation? end end end @@ -146,6 +149,7 @@ def to_s end.join("\n") end end + # rubocop:enable Metrics/ClassLength end end end