diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/CHANGELOG.md b/sdk/monitor/azure-monitor-opentelemetry-exporter/CHANGELOG.md index 8bd99bcece52..ff7fdd72501f 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/CHANGELOG.md +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/CHANGELOG.md @@ -5,6 +5,8 @@ ### Features Added ### Breaking Changes +- Fix to accommodate breaking log changes from Otel + ([#43626](https://github.com/Azure/azure-sdk-for-python/pull/43626)) - Pin OpenTelemetry versions to guard against upstream logging breaking changes ([#44220](https://github.com/Azure/azure-sdk-for-python/pull/44220)) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_manager.py index ec7c0d289d66..34d53c376a39 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_manager.py @@ -9,7 +9,7 @@ from opentelemetry import metrics from opentelemetry.metrics import CallbackOptions, Observation -from opentelemetry.sdk._logs import LogData +from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.semconv.attributes.exception_attributes import ( EXCEPTION_MESSAGE, @@ -632,13 +632,13 @@ def _record_span(self, span: ReadableSpan) -> None: except Exception: # pylint: disable=broad-except _logger.exception("Exception occurred while recording span.") # pylint: disable=C4769 - def _record_log_record(self, log_data: LogData) -> None: + def _record_log_record(self, readable_log_record: ReadableLogRecord) -> None: try: # pylint: disable=global-statement global _EXCEPTIONS_COUNT - if log_data.log_record: + if readable_log_record.log_record: exc_type = None - log_record = log_data.log_record + log_record = readable_log_record.log_record if log_record.attributes: exc_type = log_record.attributes.get(EXCEPTION_TYPE) exc_message = log_record.attributes.get(EXCEPTION_MESSAGE) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_processor.py index 8c42fc7b379f..d629535f4d5b 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_processor.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from opentelemetry.sdk._logs import LogData, LogRecordProcessor +from opentelemetry.sdk._logs import ReadableLogRecord, LogRecordProcessor from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor from azure.monitor.opentelemetry.exporter._performance_counters._manager import _PerformanceCountersManager @@ -13,18 +13,18 @@ def __init__(self): super().__init__() self.call_on_emit = hasattr(super(), 'on_emit') - def on_emit(self, log_data: LogData) -> None: # type: ignore + def on_emit(self, readable_log_record: ReadableLogRecord) -> None: # type: ignore # pylint: disable=arguments-renamed pcm = _PerformanceCountersManager() if pcm: - pcm._record_log_record(log_data) + pcm._record_log_record(readable_log_record) if self.call_on_emit: - super().on_emit(log_data) # type: ignore[safe-super] + super().on_emit(readable_log_record) # type: ignore[safe-super] else: # this method was removed in opentelemetry-sdk and replaced with on_emit - super().emit(log_data) # type: ignore[safe-super,misc] # pylint: disable=no-member + super().emit(readable_log_record) # type: ignore[safe-super,misc] # pylint: disable=no-member - def emit(self, log_data: LogData) -> None: - self.on_emit(log_data) + def emit(self, readable_log_record: ReadableLogRecord) -> None: + self.on_emit(readable_log_record) def shutdown(self): pass diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_manager.py index 533cdc83247a..e6d5c8aa4077 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_manager.py @@ -9,7 +9,7 @@ import psutil -from opentelemetry.sdk._logs import LogData +from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.sdk.metrics import MeterProvider, Meter from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import ReadableSpan @@ -353,7 +353,7 @@ def _record_span(self, span: ReadableSpan) -> None: except Exception as e: # pylint: disable=broad-except _logger.exception("Exception occurred while recording span: %s", e) # pylint: disable=C4769 - def _record_log_record(self, log_data: LogData) -> None: + def _record_log_record(self, readable_log_record: ReadableLogRecord) -> None: # Only record if in post state and manager is initialized if not (_is_post_state() and self.is_initialized()): return @@ -364,9 +364,9 @@ def _record_log_record(self, log_data: LogData) -> None: return try: - if log_data.log_record: + if readable_log_record.log_record: exc_type = None - log_record = log_data.log_record + log_record = readable_log_record.log_record if log_record.attributes: exc_type = log_record.attributes.get(SpanAttributes.EXCEPTION_TYPE) exc_message = log_record.attributes.get(SpanAttributes.EXCEPTION_MESSAGE) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_processor.py index 97414109ea29..ac06802c3327 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_processor.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from opentelemetry.sdk._logs import LogData, LogRecordProcessor +from opentelemetry.sdk._logs import ReadableLogRecord, LogRecordProcessor from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor from azure.monitor.opentelemetry.exporter._quickpulse._state import get_quickpulse_manager @@ -13,18 +13,18 @@ def __init__(self): super().__init__() self.call_on_emit = hasattr(super(), 'on_emit') - def on_emit(self, log_data: LogData) -> None: # type: ignore + def on_emit(self, readable_log_record: ReadableLogRecord) -> None: # type: ignore # pylint: disable=arguments-renamed qpm = get_quickpulse_manager() if qpm: - qpm._record_log_record(log_data) + qpm._record_log_record(readable_log_record) if self.call_on_emit: - super().on_emit(log_data) # type: ignore[safe-super] + super().on_emit(readable_log_record) # type: ignore[safe-super] else: # this method was removed in opentelemetry-sdk and replaced with on_emit - super().emit(log_data) # type: ignore[safe-super,misc] # pylint: disable=no-member + super().emit(readable_log_record) # type: ignore[safe-super,misc] # pylint: disable=no-member - def emit(self, log_data: LogData) -> None: - self.on_emit(log_data) + def emit(self, readable_log_record: ReadableLogRecord) -> None: # pylint: disable=arguments-renamed + self.on_emit(readable_log_record) def shutdown(self): pass diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_types.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_types.py index 810c3f91fe3b..fe0b0e94ec2f 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_types.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_types.py @@ -4,8 +4,8 @@ from dataclasses import dataclass, fields from typing import Dict, no_type_check -from opentelemetry.sdk._logs import LogRecord -from opentelemetry.sdk.trace import Event, ReadableSpan +from opentelemetry._logs import LogRecord +from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.semconv._incubating.attributes import gen_ai_attributes from opentelemetry.semconv.attributes.http_attributes import ( HTTP_REQUEST_METHOD, @@ -177,7 +177,7 @@ def _from_log_record(log_record: LogRecord): @staticmethod @no_type_check - def _from_span_event(span_event: Event): + def _from_span_event(span_event: LogRecord): return _ExceptionData( message=str(span_event.attributes.get(SpanAttributes.EXCEPTION_MESSAGE, "")), stack_trace=str(span_event.attributes.get(SpanAttributes.EXCEPTION_STACKTRACE, "")), diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_exporter.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_exporter.py index ad83e55bb809..b1101f39597f 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_exporter.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_exporter.py @@ -11,8 +11,8 @@ EXCEPTION_STACKTRACE, EXCEPTION_TYPE, ) -from opentelemetry.sdk._logs import LogData -from opentelemetry.sdk._logs.export import LogExporter, LogExportResult +from opentelemetry.sdk._logs import ReadableLogRecord +from opentelemetry.sdk._logs.export import LogRecordExporter, LogRecordExportResult from azure.monitor.opentelemetry.exporter import _utils from azure.monitor.opentelemetry.exporter._constants import ( @@ -53,16 +53,16 @@ __all__ = ["AzureMonitorLogExporter"] -class AzureMonitorLogExporter(BaseExporter, LogExporter): +class AzureMonitorLogExporter(BaseExporter, LogRecordExporter): """Azure Monitor Log exporter for OpenTelemetry.""" - def export(self, batch: Sequence[LogData], **kwargs: Any) -> LogExportResult: # pylint: disable=unused-argument + def export(self, batch: Sequence[ReadableLogRecord], **kwargs: Any) -> LogRecordExportResult: # pylint: disable=unused-argument """Export log data. - :param batch: OpenTelemetry LogData(s) to export. - :type batch: ~typing.Sequence[~opentelemetry._logs.LogData] + :param batch: OpenTelemetry ReadableLogRecord(s) to export. + :type batch: ~typing.Sequence[~opentelemetry._logs.ReadableLogRecord] :return: The result of the export. - :rtype: ~opentelemetry.sdk._logs.export.LogData + :rtype: ~opentelemetry.sdk._logs.export.ReadableLogRecord """ envelopes = [self._log_to_envelope(log) for log in batch] try: @@ -81,8 +81,8 @@ def shutdown(self) -> None: if self.storage: self.storage.close() - def _log_to_envelope(self, log_data: LogData) -> TelemetryItem: - envelope = _convert_log_to_envelope(log_data) + def _log_to_envelope(self, readable_log_record: ReadableLogRecord) -> TelemetryItem: + envelope = _convert_log_to_envelope(readable_log_record) envelope.instrumentation_key = self._instrumentation_key return envelope @@ -106,8 +106,8 @@ def from_connection_string(cls, conn_str: str, **kwargs: Any) -> "AzureMonitorLo return cls(connection_string=conn_str, **kwargs) -def _log_data_is_event(log_data: LogData) -> bool: - log_record = log_data.log_record +def _log_data_is_event(readable_log_record: ReadableLogRecord) -> bool: + log_record = readable_log_record.log_record is_event = None if log_record.attributes: is_event = log_record.attributes.get(_MICROSOFT_CUSTOM_EVENT_NAME) or \ @@ -117,11 +117,11 @@ def _log_data_is_event(log_data: LogData) -> bool: # pylint: disable=protected-access # pylint: disable=too-many-statements -def _convert_log_to_envelope(log_data: LogData) -> TelemetryItem: - log_record = log_data.log_record +def _convert_log_to_envelope(readable_log_record: ReadableLogRecord) -> TelemetryItem: + log_record = readable_log_record.log_record time_stamp = log_record.timestamp if log_record.timestamp is not None else log_record.observed_timestamp envelope = _utils._create_telemetry_item(time_stamp) - envelope.tags.update(_utils._populate_part_a_fields(log_record.resource)) # type: ignore + envelope.tags.update(_utils._populate_part_a_fields(readable_log_record.resource)) # type: ignore envelope.tags[ContextTagKeys.AI_OPERATION_ID] = "{:032x}".format( # type: ignore log_record.trace_id or _DEFAULT_TRACE_ID ) @@ -177,7 +177,7 @@ def _convert_log_to_envelope(log_data: LogData) -> TelemetryItem: exceptions=[exc_details], ) envelope.data = MonitorBase(base_data=data, base_type="ExceptionData") - elif _log_data_is_event(log_data): # Event telemetry + elif _log_data_is_event(readable_log_record): # Event telemetry _set_statsbeat_custom_events_feature() envelope.name = "Microsoft.ApplicationInsights.Event" event_name = "" @@ -207,10 +207,10 @@ def _convert_log_to_envelope(log_data: LogData) -> TelemetryItem: return envelope -def _get_log_export_result(result: ExportResult) -> LogExportResult: +def _get_log_export_result(result: ExportResult) -> LogRecordExportResult: if result == ExportResult.SUCCESS: - return LogExportResult.SUCCESS - return LogExportResult.FAILURE + return LogRecordExportResult.SUCCESS + return LogRecordExportResult.FAILURE # pylint: disable=line-too-long diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_processor.py index 382f5d4f948d..52ac651eab2d 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_processor.py @@ -3,7 +3,7 @@ from typing import Optional, Dict, Any -from opentelemetry.sdk._logs import LogData +from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, LogExporter from opentelemetry.trace import get_current_span @@ -26,7 +26,7 @@ def __init__( self._options = options or {} self._enable_trace_based_sampling_for_logs = self._options.get("enable_trace_based_sampling_for_logs") - def on_emit(self, log_data: LogData) -> None: + def on_emit(self, readable_log_record: ReadableLogRecord) -> None: # pylint: disable=arguments-renamed # cspell: disable """ Determines whether the logger should drop log records associated with unsampled traces. If `trace_based_sampling` is `true`, log records associated with unsampled traces are dropped by the `Logger`. @@ -34,16 +34,16 @@ def on_emit(self, log_data: LogData) -> None: `TraceFlags` indicate that the trace is unsampled. A log record that isn't associated with a trace context is not affected by this parameter and therefore bypasses trace based sampling filtering. - :param log_data: Contains the log record to be exported - :type log_data: LogData + :param readable_log_record: Contains the log record to be exported + :type readable_log_record: ReadableLogRecord """ # cspell: enable if self._enable_trace_based_sampling_for_logs: - if hasattr(log_data, "log_record") and log_data.log_record is not None: - if hasattr(log_data.log_record, "context") and log_data.log_record.context is not None: - span = get_current_span(log_data.log_record.context) + if hasattr(readable_log_record, "log_record") and readable_log_record.log_record is not None: + if hasattr(readable_log_record.log_record, "context") and readable_log_record.log_record.context is not None: # pylint: disable=line-too-long + span = get_current_span(readable_log_record.log_record.context) span_context = span.get_span_context() if span_context.is_valid and not span_context.trace_flags.sampled: return - super().on_emit(log_data) + super().on_emit(readable_log_record) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/setup.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/setup.py index 55d6c46f891b..db3a12c7136d 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/setup.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/setup.py @@ -85,8 +85,8 @@ "azure-core<2.0.0,>=1.28.0", "azure-identity~=1.17", "msrest>=0.6.10", - "opentelemetry-api==1.38", - "opentelemetry-sdk==1.38", + "opentelemetry-api==1.39", + "opentelemetry-sdk==1.39", "psutil>=5.9,<8", ], entry_points={ diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_logs.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_logs.py index 5f09533b1b24..51946a92599e 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_logs.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_logs.py @@ -15,9 +15,10 @@ EXCEPTION_TYPE, ) from opentelemetry.sdk import _logs +from opentelemetry._logs import LogRecord from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.sdk.resources import Resource -from opentelemetry.sdk._logs.export import LogExportResult +from opentelemetry.sdk._logs.export import LogRecordExportResult from opentelemetry._logs.severity import SeverityNumber from opentelemetry.trace import set_span_in_context, SpanContext, NonRecordingSpan @@ -72,146 +73,145 @@ def setUpClass(cls): ) span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - cls._log_data = _logs.LogData( - _logs.LogRecord( + cls._log_data = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="WARNING", severity_number=SeverityNumber.WARN, body="Test message", - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={"test": "attribute", "ai.operation.name": "TestOperationName"}, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._log_data_empty = _logs.LogData( - _logs.LogRecord( + cls._log_data_empty = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="WARNING", severity_number=SeverityNumber.WARN, body="", - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={"test": "attribute", "ai.operation.name": "TestOperationName"}, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._log_data_none = _logs.LogData( - _logs.LogRecord( + cls._log_data_none = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="WARNING", severity_number=SeverityNumber.WARN, body=None, - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={"test": "attribute"}, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._log_data_complex_body = _logs.LogData( - _logs.LogRecord( + cls._log_data_complex_body = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="WARNING", severity_number=SeverityNumber.WARN, body={"foo": {"bar": "baz", "qux": 42}}, - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={"test": "attribute", "ai.operation.name": "TestOperationName"}, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._log_data_complex_body_not_serializeable = _logs.LogData( - _logs.LogRecord( + cls._log_data_complex_body_not_serializeable = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="WARNING", severity_number=SeverityNumber.WARN, body=NotSerializeableClass(), - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={"test": "attribute"}, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._log_data_empty_with_whitespaces = _logs.LogData( - _logs.LogRecord( + cls._log_data_empty_with_whitespaces = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="WARNING", severity_number=SeverityNumber.WARN, body=" ", - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={"test": "attribute"}, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._log_data_event = _logs.LogData( - _logs.LogRecord( + cls._log_data_event = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="INFO", severity_number=SeverityNumber.INFO, body="Test Event", - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={ "event_key": "event_attribute", _APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE: True, }, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._log_data_event_complex_body = _logs.LogData( - _logs.LogRecord( + cls._log_data_event_complex_body = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="INFO", severity_number=SeverityNumber.INFO, body={"foo": {"bar": "baz", "qux": 42}}, - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={ "event_key": "event_attribute", _APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE: True, }, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._log_data_event_complex_body_not_serializeable = _logs.LogData( - _logs.LogRecord( + cls._log_data_event_complex_body_not_serializeable = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, severity_text="INFO", severity_number=SeverityNumber.INFO, body=NotSerializeableClass(), - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={ "event_key": "event_attribute", _APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE: True, }, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._log_data_custom_event = _logs.LogData( - _logs.LogRecord( + cls._log_data_custom_event = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="INFO", severity_number=SeverityNumber.INFO, body="Test Event", - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={ "event_key": "event_attribute", _MICROSOFT_CUSTOM_EVENT_NAME: "event_name", "client.address": "192.168.1.1", }, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._exc_data = _logs.LogData( - _logs.LogRecord( + cls._exc_data = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="EXCEPTION", severity_number=SeverityNumber.FATAL, body="Test message", - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={ "test": "attribute", EXCEPTION_TYPE: "ZeroDivisionError", @@ -219,16 +219,16 @@ def setUpClass(cls): EXCEPTION_STACKTRACE: 'Traceback (most recent call last):\n File "test.py", line 38, in \n raise ZeroDivisionError()\nZeroDivisionError\n', }, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._exc_data_with_exc_body = _logs.LogData( - _logs.LogRecord( + cls._exc_data_with_exc_body = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="EXCEPTION", severity_number=SeverityNumber.FATAL, body=Exception("test exception message"), - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={ "test": "attribute", EXCEPTION_TYPE: "ZeroDivisionError", @@ -236,31 +236,32 @@ def setUpClass(cls): EXCEPTION_STACKTRACE: 'Traceback (most recent call last):\n File "test.py", line 38, in \n raise ZeroDivisionError()\nZeroDivisionError\n', }, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._exc_data_blank_exception = _logs.LogData( - _logs.LogRecord( + cls._exc_data_blank_exception = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="EXCEPTION", severity_number=SeverityNumber.FATAL, body="test exception", - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={"test": "attribute", EXCEPTION_TYPE: "", EXCEPTION_MESSAGE: "", EXCEPTION_STACKTRACE: ""}, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) - cls._exc_data_empty = _logs.LogData( - _logs.LogRecord( + cls._exc_data_empty = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="EXCEPTION", severity_number=SeverityNumber.FATAL, body="", - resource=Resource.create(attributes={"asd": "test_resource"}), attributes={"test": "attribute", EXCEPTION_TYPE: "", EXCEPTION_MESSAGE: "", EXCEPTION_STACKTRACE: ""}, ), - InstrumentationScope("test_name"), + resource=Resource.create(attributes={"asd": "test_resource"}), + instrumentation_scope=InstrumentationScope("test_name"), ) @classmethod @@ -291,7 +292,7 @@ def test_from_connection_string(self): def test_export_empty(self): exporter = self._exporter result = exporter.export([]) - self.assertEqual(result, LogExportResult.SUCCESS) + self.assertEqual(result, LogRecordExportResult.SUCCESS) def test_export_failure(self): exporter = self._exporter @@ -302,7 +303,7 @@ def test_export_failure(self): storage_mock = mock.Mock() exporter.storage.put = storage_mock result = exporter.export([self._log_data]) - self.assertEqual(result, LogExportResult.FAILURE) + self.assertEqual(result, LogRecordExportResult.FAILURE) self.assertEqual(storage_mock.call_count, 1) def test_export_success(self): @@ -314,7 +315,7 @@ def test_export_success(self): storage_mock = mock.Mock() exporter._transmit_from_storage = storage_mock result = exporter.export([self._log_data]) - self.assertEqual(result, LogExportResult.SUCCESS) + self.assertEqual(result, LogRecordExportResult.SUCCESS) self.assertEqual(storage_mock.call_count, 1) @mock.patch("azure.monitor.opentelemetry.exporter.export.logs._exporter._logger") @@ -325,7 +326,7 @@ def test_export_exception(self, logger_mock): throw(Exception), ): # noqa: E501 result = exporter.export([self._log_data]) - self.assertEqual(result, LogExportResult.FAILURE) + self.assertEqual(result, LogRecordExportResult.FAILURE) self.assertEqual(logger_mock.exception.called, True) def test_export_not_retryable(self): @@ -335,11 +336,11 @@ def test_export_not_retryable(self): ) as transmit: # noqa: E501 transmit.return_value = ExportResult.FAILED_NOT_RETRYABLE result = exporter.export([self._log_data]) - self.assertEqual(result, LogExportResult.FAILURE) + self.assertEqual(result, LogRecordExportResult.FAILURE) def test_log_to_envelope_partA(self): exporter = self._exporter - old_resource = self._log_data.log_record.resource + old_resource = self._log_data.resource resource = Resource( { "service.name": "testServiceName", @@ -347,7 +348,7 @@ def test_log_to_envelope_partA(self): "service.instance.id": "testServiceInstanceId", } ) - self._log_data.log_record.resource = resource + self._log_data.resource = resource envelope = exporter._log_to_envelope(self._log_data) self.assertEqual(envelope.instrumentation_key, "1234abcd-5678-4efa-8abc-1234567890ab") @@ -373,14 +374,14 @@ def test_log_to_envelope_partA(self): self.assertEqual(envelope.tags.get(ContextTagKeys.AI_OPERATION_ID), "{:032x}".format(trace_id)) span_id = self._log_data.log_record.span_id self.assertEqual(envelope.tags.get(ContextTagKeys.AI_OPERATION_PARENT_ID), "{:016x}".format(span_id)) - self._log_data.log_record.resource = old_resource + self._log_data.resource = old_resource self.assertEqual(envelope.tags.get(ContextTagKeys.AI_OPERATION_NAME), "TestOperationName") def test_log_to_envelope_partA_default(self): exporter = self._exporter - old_resource = self._log_data.log_record.resource + old_resource = self._log_data.resource resource = Resource({"service.name": "testServiceName"}) - self._log_data.log_record.resource = resource + self._log_data.resource = resource envelope = exporter._log_to_envelope(self._log_data) self.assertEqual(envelope.tags.get(ContextTagKeys.AI_CLOUD_ROLE), "testServiceName") self.assertEqual(envelope.tags.get(ContextTagKeys.AI_CLOUD_ROLE_INSTANCE), platform.node()) @@ -388,7 +389,7 @@ def test_log_to_envelope_partA_default(self): envelope.tags.get(ContextTagKeys.AI_INTERNAL_NODE_NAME), envelope.tags.get(ContextTagKeys.AI_CLOUD_ROLE_INSTANCE), ) - self._log_data.log_record.resource = old_resource + self._log_data.resource = old_resource def test_log_to_envelope_log(self): exporter = self._exporter @@ -574,20 +575,20 @@ def test_log_to_envelope_synthetic_source(self): ) span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - log_data = _logs.LogData( - _logs.LogRecord( + log_data = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="WARNING", severity_number=SeverityNumber.WARN, body="Test message", - resource=resource, attributes={ "test": "attribute", "user_agent.synthetic.type": "bot", }, ), - InstrumentationScope("test_name"), + resource=resource, + instrumentation_scope=InstrumentationScope("test_name"), ) envelope = exporter._log_to_envelope(log_data) @@ -612,20 +613,20 @@ def test_log_to_envelope_synthetic_load_always_on(self): ) span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - log_data = _logs.LogData( - _logs.LogRecord( + log_data = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="WARNING", severity_number=SeverityNumber.WARN, body="Test message", - resource=resource, attributes={ "test": "attribute", "http.user_agent": "Azure-Load-Testing/1.0 AlwaysOn", }, ), - InstrumentationScope("test_name"), + resource=resource, + instrumentation_scope=InstrumentationScope("test_name"), ) envelope = exporter._log_to_envelope(log_data) @@ -667,7 +668,7 @@ def test_export_failure(self): transmit_from_storage_mock = mock.Mock() exporter._handle_transmit_from_storage = transmit_from_storage_mock result = exporter.export([self._log_data]) - self.assertEqual(result, LogExportResult.FAILURE) + self.assertEqual(result, LogRecordExportResult.FAILURE) self.assertEqual(exporter.storage, None) self.assertEqual(transmit_from_storage_mock.call_count, 1) @@ -680,7 +681,7 @@ def test_export_success(self): storage_mock = mock.Mock() exporter._transmit_from_storage = storage_mock result = exporter.export([self._log_data]) - self.assertEqual(result, LogExportResult.SUCCESS) + self.assertEqual(result, LogRecordExportResult.SUCCESS) self.assertEqual(storage_mock.call_count, 0) @@ -688,17 +689,17 @@ class TestAzureLogExporterUtils(unittest.TestCase): def test_get_log_export_result(self): self.assertEqual( _get_log_export_result(ExportResult.SUCCESS), - LogExportResult.SUCCESS, + LogRecordExportResult.SUCCESS, ) self.assertEqual( _get_log_export_result(ExportResult.FAILED_NOT_RETRYABLE), - LogExportResult.FAILURE, + LogRecordExportResult.FAILURE, ) self.assertEqual( _get_log_export_result(ExportResult.FAILED_RETRYABLE), - LogExportResult.FAILURE, + LogRecordExportResult.FAILURE, ) - self.assertEqual(_get_log_export_result(None), LogExportResult.FAILURE) + self.assertEqual(_get_log_export_result(None), LogRecordExportResult.FAILURE) def test_get_severity_level(self): for sev_num in SeverityNumber: diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_processor.py index d48c76d12596..ed9dace6b025 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_processor.py @@ -3,6 +3,7 @@ from unittest import mock from opentelemetry.sdk import _logs +from opentelemetry._logs import LogRecord from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry._logs.severity import SeverityNumber from opentelemetry.trace import TraceFlags, set_span_in_context, SpanContext, NonRecordingSpan @@ -71,8 +72,8 @@ def test_on_emit_with_trace_based_sampling_disabled(self): span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - log_record = _logs.LogData( - _logs.LogRecord( + log_record = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="INFO", @@ -111,8 +112,8 @@ def test_on_emit_with_trace_based_sampling_enabled_and_unsampled_trace(self): # span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - log_record = _logs.LogData( - _logs.LogRecord( + log_record = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="INFO", @@ -152,8 +153,8 @@ def test_on_emit_with_trace_based_sampling_enabled_and_sampled_trace(self): span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - log_record = _logs.LogData( - _logs.LogRecord( + log_record = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="INFO", @@ -191,8 +192,8 @@ def test_on_emit_with_trace_based_sampling_enabled_and_invalid_span_context(self span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - log_record = _logs.LogData( - _logs.LogRecord( + log_record = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="INFO", @@ -215,8 +216,8 @@ def test_on_emit_with_trace_based_sampling_enabled_and_no_context(self): options={"enable_trace_based_sampling_for_logs": True} ) - log_record = _logs.LogData( - _logs.LogRecord( + log_record = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419456, context=None, severity_text="INFO", @@ -263,8 +264,8 @@ def test_on_emit_integration_with_multiple_log_records(self): span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - log_record_unsampled = _logs.LogData( # cspell:disable-line - _logs.LogRecord( + log_record_unsampled = _logs.ReadWriteLogRecord( # cspell:disable-line + LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="INFO", @@ -283,8 +284,8 @@ def test_on_emit_integration_with_multiple_log_records(self): span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - log_record_sampled = _logs.LogData( - _logs.LogRecord( + log_record_sampled = _logs.ReadWriteLogRecord( + LogRecord( timestamp=1646865018558419457, context=ctx, severity_text="INFO", diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_performance_counters.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_performance_counters.py index de32608846d8..0c15af74cf54 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_performance_counters.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_performance_counters.py @@ -14,7 +14,7 @@ ) from opentelemetry.trace import SpanKind from opentelemetry.sdk.trace import ReadableSpan -from opentelemetry.sdk._logs import LogData +from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import InMemoryMetricReader @@ -568,14 +568,14 @@ def test_record_log_record_with_exception(self, mock_get_meter_provider): EXCEPTION_MESSAGE: "Test exception" } - mock_log_data = MagicMock(spec=LogData) - mock_log_data.log_record = mock_log_record - + mock_readable_log_record = MagicMock(spec=ReadableLogRecord) + mock_readable_log_record.log_record = mock_log_record + # Import to access global counter import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module initial_exceptions = manager_module._EXCEPTIONS_COUNT - - manager._record_log_record(mock_log_data) + + manager._record_log_record(mock_readable_log_record) # Check that exception was counted self.assertEqual(manager_module._EXCEPTIONS_COUNT, initial_exceptions + 1) @@ -587,15 +587,15 @@ def test_record_log_record_without_exception(self): # Create a mock log data without exception attributes mock_log_record = MagicMock() mock_log_record.attributes = {"normal": "attribute"} - - mock_log_data = MagicMock(spec=LogData) - mock_log_data.log_record = mock_log_record - + + mock_readable_log_record = MagicMock(spec=ReadableLogRecord) + mock_readable_log_record.log_record = mock_log_record + # Import to access global counter import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module initial_exceptions = manager_module._EXCEPTIONS_COUNT - - manager._record_log_record(mock_log_data) + + manager._record_log_record(mock_readable_log_record) # Exception count should not change self.assertEqual(manager_module._EXCEPTIONS_COUNT, initial_exceptions) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_processor.py index e7602048f7dd..132c06b6763d 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_processor.py @@ -5,7 +5,7 @@ from unittest import mock from unittest.mock import MagicMock -from opentelemetry.sdk._logs import LogData +from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.sdk.trace import ReadableSpan from azure.monitor.opentelemetry.exporter._performance_counters._processor import ( @@ -42,13 +42,13 @@ def test_on_emit_with_manager(self, mock_manager_class): processor = _PerformanceCountersLogRecordProcessor() # Create mock log data - mock_log_data = MagicMock(spec=LogData) - - processor.on_emit(mock_log_data) - + mock_readable_log_record = MagicMock(spec=ReadableLogRecord) + + processor.on_emit(mock_readable_log_record) + # Verify manager was called mock_manager_class.assert_called_once() - mock_manager._record_log_record.assert_called_once_with(mock_log_data) + mock_manager._record_log_record.assert_called_once_with(mock_readable_log_record) def test_emit_calls_on_emit(self): """Test emit method calls on_emit.""" @@ -58,12 +58,12 @@ def test_emit_calls_on_emit(self): processor.on_emit = MagicMock() # Create mock log data - mock_log_data = MagicMock(spec=LogData) - - processor.emit(mock_log_data) - + mock_readable_log_record = MagicMock(spec=ReadableLogRecord) + + processor.emit(mock_readable_log_record) + # Verify on_emit was called - processor.on_emit.assert_called_once_with(mock_log_data) + processor.on_emit.assert_called_once_with(mock_readable_log_record) def test_shutdown(self): """Test shutdown method.""" @@ -91,12 +91,11 @@ def test_exception_propagation_in_on_emit(self, mock_manager_class): processor = _PerformanceCountersLogRecordProcessor() # Create mock log data - mock_log_data = MagicMock(spec=LogData) + mock_readable_log_record = MagicMock(spec=ReadableLogRecord) # Exception should be propagated with self.assertRaises(Exception) as context: - processor.on_emit(mock_log_data) - + processor.on_emit(mock_readable_log_record) self.assertEqual(str(context.exception), "Test error") diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_manager.py index 4e31bae3ac68..235c553c77d1 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_manager.py @@ -956,11 +956,11 @@ def test_record_log_record_exception_handling(self, post_state_mock, data_mock, @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._TelemetryData") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._is_post_state") def test_record_log_record_no_log_record(self, post_state_mock, data_mock, metric_derive_mock, doc_mock): - """Test _record_log_record when log_data.log_record is None.""" + """Test _record_log_record when readable_log_record.log_record is None.""" post_state_mock.return_value = True log_data_mock = mock.Mock() log_data_mock.log_record = None - + qpm = _QuickpulseManager() qpm.initialize(connection_string=self.connection_string, resource=self.resource) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_processor.py index 6fb8f0439318..cc6a890c403e 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_processor.py @@ -28,11 +28,11 @@ def test_emit(self, mock_get_manager): mock_get_manager.return_value = mock_manager processor = _QuickpulseLogRecordProcessor() - log_data = mock.Mock() - processor.on_emit(log_data) - + readable_log_record = mock.Mock() + processor.on_emit(readable_log_record) + mock_get_manager.assert_called_once() - mock_manager._record_log_record.assert_called_once_with(log_data) + mock_manager._record_log_record.assert_called_once_with(readable_log_record) class TestQuickpulseSpanProcessor(unittest.TestCase): diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_types.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_types.py index 15edcda6cdd5..9979f69f6be6 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_types.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_types.py @@ -6,8 +6,8 @@ import unittest from unittest.mock import patch, Mock -from opentelemetry.sdk._logs import LogRecord -from opentelemetry.sdk.trace import Event, ReadableSpan +from opentelemetry._logs import LogRecord +from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.semconv._incubating.attributes import gen_ai_attributes from opentelemetry.semconv.attributes.http_attributes import ( HTTP_REQUEST_METHOD, @@ -248,7 +248,7 @@ def setUp(self): SpanAttributes.EXCEPTION_STACKTRACE: "Test stack trace" } - self.span_event = Mock(spec=Event) + self.span_event = Mock(spec=LogRecord) self.span_event.attributes = { SpanAttributes.EXCEPTION_MESSAGE: "Test span event message", SpanAttributes.EXCEPTION_STACKTRACE: "Test span event stack trace"