Skip to content
Open
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
48e858a
added changes
yiphei Jan 7, 2026
5e431dc
added changes
yiphei Jan 7, 2026
06ce262
added changes
yiphei Jan 7, 2026
5a2b35d
fix attribute name
yiphei Jan 7, 2026
85c711f
Merge branch 'main' into support-custom-system
yiphei Jan 8, 2026
8df4eef
added changes
yiphei Jan 8, 2026
dd9702d
added changes
yiphei Jan 8, 2026
33c83d1
added changes
yiphei Jan 8, 2026
cd656b3
added changes
yiphei Jan 8, 2026
7f3d01f
added changes
yiphei Jan 8, 2026
3140257
added changes
yiphei Jan 8, 2026
0e079b2
Merge branch 'main' into support-custom-system
yiphei Jan 10, 2026
1bbd297
added changes
yiphei Jan 10, 2026
e8cbbaa
added changes
yiphei Jan 10, 2026
f33ddf3
added changes
yiphei Jan 10, 2026
36bba2f
refactored tests
yiphei Jan 10, 2026
fb2d2c0
Merge branch 'main' into support-custom-system
yiphei Jan 12, 2026
3cb7f36
added changes
yiphei Jan 12, 2026
d78496d
added changes
yiphei Jan 12, 2026
8b6085c
added changes
yiphei Jan 12, 2026
190fb88
Merge branch 'main' into support-custom-system
yiphei Jan 13, 2026
a200de6
added changes
yiphei Jan 13, 2026
fc506ab
added changes
yiphei Jan 13, 2026
ca42b29
added changes
yiphei Jan 13, 2026
85186ae
added changes
yiphei Jan 13, 2026
28e7ee0
Merge branch 'main' into support-custom-system
yiphei Jan 15, 2026
3f9255a
added changes
yiphei Jan 15, 2026
c702827
added changes
yiphei Jan 15, 2026
c0d1f2d
Merge branch 'main' into support-custom-system
yiphei Jan 19, 2026
a2aa776
Merge pull request #7 from yiphei/support-custom-system-pt2
yiphei Jan 19, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions logfire/_internal/integrations/llm_providers/llm_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ def instrument_llm_provider(
get_endpoint_config_fn: Callable[[Any], EndpointConfig],
on_response_fn: Callable[[Any, LogfireSpan], Any],
is_async_client_fn: Callable[[type[Any]], bool],
override_provider: str | None = None,
) -> AbstractContextManager[None]:
"""Instruments the provided `client` (or clients) with `logfire`.

Expand Down Expand Up @@ -93,6 +94,8 @@ def _instrumentation_setup(*args: Any, **kwargs: Any) -> Any:
return None, None, kwargs

span_data['async'] = is_async
if override_provider is not None:
span_data['gen_ai.system'] = override_provider

if kwargs.get('stream') and stream_state_cls:
stream_cls = kwargs['stream_cls']
Expand Down
3 changes: 2 additions & 1 deletion logfire/_internal/integrations/llm_providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,8 @@ def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT:
on_response(response.parse(), span) # type: ignore
return cast('ResponseT', response)

span.set_attribute('gen_ai.system', 'openai')
if (getattr(span, 'attributes', {}) or {}).get('gen_ai.system', None) is None:
span.set_attribute('gen_ai.system', 'openai')

if isinstance(response_model := getattr(response, 'model', None), str):
span.set_attribute('gen_ai.response.model', response_model)
Expand Down
9 changes: 9 additions & 0 deletions logfire/_internal/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -1171,6 +1171,7 @@ def instrument_openai(
| None = None,
*,
suppress_other_instrumentation: bool = True,
override_provider: None | str = None,
) -> AbstractContextManager[None]:
"""Instrument an OpenAI client so that spans are automatically created for each request.

Expand Down Expand Up @@ -1219,6 +1220,13 @@ def instrument_openai(
enabled. In reality, this means the HTTPX instrumentation, which could otherwise be called since
OpenAI uses HTTPX to make HTTP requests.

override_provider: If provided, override the provider name for the instrumented client, e.g. 'openrouter'.
Do this to get:
- Correct attribution in span attributes like `gen_ai.system`
- Cost calculation in the span attribute `operation.cost`, subject to `genai_prices` package support
- Cost calculation in the Logfire UI
The default provider is 'openai'.

Returns:
A context manager that will revert the instrumentation when exited.
Use of this context manager is optional.
Expand All @@ -1237,6 +1245,7 @@ def instrument_openai(
get_endpoint_config,
on_response,
is_async_client,
override_provider,
)

def instrument_openai_agents(self) -> None:
Expand Down
137 changes: 137 additions & 0 deletions tests/otel_integrations/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from collections.abc import AsyncIterator, Iterator
from io import BytesIO
from typing import Any
from unittest.mock import MagicMock

import httpx
import openai
Expand All @@ -26,6 +27,7 @@
from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor

import logfire
from logfire._internal.integrations.llm_providers.openai import on_response
from logfire._internal.utils import get_version, suppress_instrumentation
from logfire.testing import TestExporter

Expand Down Expand Up @@ -2492,3 +2494,138 @@ def test_openrouter_streaming_reasoning(exporter: TestExporter) -> None:
},
]
)


def test_override_provider_sync(exporter: TestExporter) -> None:
"""Test that override_provider sets gen_ai.system correctly for sync clients."""
with httpx.Client(transport=MockTransport(request_handler)) as httpx_client:
openai_client = openai.Client(api_key='foobar', http_client=httpx_client)
logfire.instrument_openai(openai_client, override_provider='openrouter')

response = openai_client.chat.completions.create(
model='gpt-4',
messages=[
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'What is four plus five?'},
],
)

assert response.choices[0].message.content == 'Nine'
spans = exporter.exported_spans_as_dict(parse_json_attributes=True)
assert len(spans) == 1
assert spans[0]['attributes']['gen_ai.system'] == 'openrouter'


async def test_override_provider_async(exporter: TestExporter) -> None:
"""Test that override_provider sets gen_ai.system correctly for async clients."""
async with httpx.AsyncClient(transport=MockTransport(request_handler)) as httpx_client:
openai_client = openai.AsyncClient(api_key='foobar', http_client=httpx_client)
logfire.instrument_openai(openai_client, override_provider='custom-provider')

response = await openai_client.chat.completions.create(
model='gpt-4',
messages=[
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'What is four plus five?'},
],
)

assert response.choices[0].message.content == 'Nine'
spans = exporter.exported_spans_as_dict(parse_json_attributes=True)
assert len(spans) == 1
assert spans[0]['attributes']['gen_ai.system'] == 'custom-provider'


def test_override_provider_streaming(exporter: TestExporter) -> None:
"""Test that override_provider works correctly with streaming responses."""
with httpx.Client(transport=MockTransport(request_handler)) as httpx_client:
openai_client = openai.Client(api_key='foobar', http_client=httpx_client)
logfire.instrument_openai(openai_client, override_provider='openrouter')

response = openai_client.chat.completions.create(
model='gpt-4',
messages=[
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'What is four plus five?'},
],
stream=True,
)

# Consume the stream
for _ in response:
pass

spans = exporter.exported_spans_as_dict(parse_json_attributes=True)
# First span is the request span
request_span = next(s for s in spans if 'Chat Completion' in s['name'])
assert request_span['attributes']['gen_ai.system'] == 'openrouter'


def test_default_provider_is_openai(exporter: TestExporter) -> None:
"""Test that when override_provider is not set, gen_ai.system defaults to 'openai'."""
with httpx.Client(transport=MockTransport(request_handler)) as httpx_client:
openai_client = openai.Client(api_key='foobar', http_client=httpx_client)
# Not passing override_provider, so it should default to 'openai'
logfire.instrument_openai(openai_client)

response = openai_client.chat.completions.create(
model='gpt-4',
messages=[
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'What is four plus five?'},
],
)

assert response.choices[0].message.content == 'Nine'
spans = exporter.exported_spans_as_dict(parse_json_attributes=True)
assert spans[0]['attributes']['gen_ai.system'] == 'openai'


@pytest.mark.parametrize(
('span_attributes', 'should_set_gen_ai_system'),
[
pytest.param({}, True, id='empty_attributes_sets_openai'),
pytest.param(None, True, id='none_attributes_sets_openai'),
pytest.param({'gen_ai.system': 'openrouter'}, False, id='existing_value_not_overwritten'),
],
)
def test_on_response_gen_ai_system_behavior(
span_attributes: dict[str, str] | None, should_set_gen_ai_system: bool
) -> None:
"""Test that on_response sets gen_ai.system to 'openai' only when not already present."""
mock_span = MagicMock()
mock_span.attributes = span_attributes

response = chat_completion.ChatCompletion(
id='test_id',
choices=[
chat_completion.Choice(
finish_reason='stop',
index=0,
message=chat_completion_message.ChatCompletionMessage(
content='Test response',
role='assistant',
),
),
],
created=1634720000,
model='gpt-4',
object='chat.completion',
usage=completion_usage.CompletionUsage(
completion_tokens=1,
prompt_tokens=2,
total_tokens=3,
),
)

on_response(response, mock_span)

gen_ai_system_calls = [call for call in mock_span.set_attribute.call_args_list if call[0][0] == 'gen_ai.system']
if should_set_gen_ai_system:
assert any(call[0] == ('gen_ai.system', 'openai') for call in gen_ai_system_calls), (
f"Expected set_attribute('gen_ai.system', 'openai') to be called, got {gen_ai_system_calls}"
)
else:
assert len(gen_ai_system_calls) == 0, (
f"Expected no calls to set_attribute with 'gen_ai.system', got {gen_ai_system_calls}"
)
71 changes: 71 additions & 0 deletions tests/test_llm_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from typing import Any
from unittest.mock import Mock

import pytest
from opentelemetry import trace

import logfire
Expand Down Expand Up @@ -175,3 +176,73 @@ async def test_async_streaming_preserves_original_context(exporter: TestExporter
assert streaming['context']['trace_id'] == expected_trace_id
assert request['parent']['span_id'] == expected_span_id
assert streaming['parent']['span_id'] == expected_span_id


@pytest.mark.parametrize(
('override_provider', 'expected_gen_ai_system'),
[
pytest.param('openrouter', 'openrouter', id='sets_custom_provider'),
pytest.param(None, None, id='none_does_not_set_attribute'),
],
)
def test_override_provider_sync(
exporter: TestExporter, override_provider: str | None, expected_gen_ai_system: str | None
) -> None:
"""Test that override_provider parameter controls the gen_ai.system attribute for sync clients."""
client = MockSyncClient()
instrument_llm_provider(
logfire=logfire.DEFAULT_LOGFIRE_INSTANCE,
client=client,
suppress_otel=False,
scope_suffix='test',
get_endpoint_config_fn=get_endpoint_config,
on_response_fn=on_response,
is_async_client_fn=is_async_client,
override_provider=override_provider,
)

client.request(options=MockOptions())

spans = exporter.exported_spans_as_dict()
request = next(s for s in spans if 'Test with' in s['name'])

if expected_gen_ai_system is None:
# When override_provider is None, gen_ai.system should not be set by instrument_llm_provider
# (it would be set later by on_response for OpenAI)
assert 'gen_ai.system' not in request['attributes']
else:
assert request['attributes']['gen_ai.system'] == expected_gen_ai_system


@pytest.mark.parametrize(
('override_provider', 'expected_gen_ai_system'),
[
pytest.param('openrouter', 'openrouter', id='sets_custom_provider'),
pytest.param(None, None, id='none_does_not_set_attribute'),
],
)
async def test_override_provider_async(
exporter: TestExporter, override_provider: str | None, expected_gen_ai_system: str | None
) -> None:
"""Test that override_provider parameter controls the gen_ai.system attribute for async clients."""
client = MockAsyncClient()
instrument_llm_provider(
logfire=logfire.DEFAULT_LOGFIRE_INSTANCE,
client=client,
suppress_otel=False,
scope_suffix='test',
get_endpoint_config_fn=get_endpoint_config,
on_response_fn=on_response,
is_async_client_fn=is_async_client,
override_provider=override_provider,
)

await client.request(options=MockOptions())

spans = exporter.exported_spans_as_dict()
request = next(s for s in spans if 'Test with' in s['name'])

if expected_gen_ai_system is None:
assert 'gen_ai.system' not in request['attributes']
else:
assert request['attributes']['gen_ai.system'] == expected_gen_ai_system
Loading