Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions logfire-api/logfire_api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,8 @@ def instrument_system_metrics(self, *args, **kwargs) -> None: ...

def instrument_mcp(self, *args, **kwargs) -> None: ...

def url_from_eval(self, *args, **kwargs) -> None: ...

def shutdown(self, *args, **kwargs) -> None: ...

DEFAULT_LOGFIRE_INSTANCE = Logfire()
Expand Down Expand Up @@ -254,6 +256,7 @@ def shutdown(self, *args, **kwargs) -> None: ...
instrument_mcp = DEFAULT_LOGFIRE_INSTANCE.instrument_mcp
shutdown = DEFAULT_LOGFIRE_INSTANCE.shutdown
suppress_scopes = DEFAULT_LOGFIRE_INSTANCE.suppress_scopes
url_from_eval = DEFAULT_LOGFIRE_INSTANCE.url_from_eval

def loguru_handler() -> dict[str, Any]:
return {}
Expand Down
2 changes: 2 additions & 0 deletions logfire/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
with_tags = DEFAULT_LOGFIRE_INSTANCE.with_tags
# with_trace_sample_rate = DEFAULT_LOGFIRE_INSTANCE.with_trace_sample_rate
with_settings = DEFAULT_LOGFIRE_INSTANCE.with_settings
url_from_eval = DEFAULT_LOGFIRE_INSTANCE.url_from_eval

# Logging
log = DEFAULT_LOGFIRE_INSTANCE.log
Expand Down Expand Up @@ -176,4 +177,5 @@ def loguru_handler() -> Any:
'set_baggage',
'get_context',
'attach_context',
'url_from_eval',
)
12 changes: 6 additions & 6 deletions logfire/_internal/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -696,6 +696,7 @@ def _load_configuration(
self.advanced = advanced

self.additional_span_processors = additional_span_processors
self.project_url: str | None = None

if metrics is None:
metrics = MetricsOptions()
Expand Down Expand Up @@ -969,6 +970,7 @@ def add_span_processor(span_processor: SpanProcessor) -> None:
# This means that e.g. a token in an env var takes priority over a token in a creds file.
self.token = self.token or credentials.token
self.advanced.base_url = self.advanced.base_url or credentials.logfire_api_url
self.project_url = self.project_url or credentials.project_url

if self.token:
# Convert to list for iteration (handles both str and list[str])
Expand All @@ -994,12 +996,10 @@ def check_tokens():
with suppress_instrumentation():
for token in token_list:
validated_credentials = self._initialize_credentials_from_token(token)
if (
validated_credentials is not None
and show_project_link
and token not in printed_tokens
):
validated_credentials.print_token_summary()
if validated_credentials is not None:
self.project_url = self.project_url or validated_credentials.project_url
if show_project_link and token not in printed_tokens:
validated_credentials.print_token_summary()

if emscripten: # pragma: no cover
check_tokens()
Expand Down
17 changes: 17 additions & 0 deletions logfire/_internal/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@
from flask.app import Flask
from opentelemetry.instrumentation.asgi.types import ClientRequestHook, ClientResponseHook, ServerRequestHook
from opentelemetry.metrics import _Gauge as Gauge
from pydantic_evals.reporting import EvaluationReport
from pymongo.monitoring import CommandFailedEvent, CommandStartedEvent, CommandSucceededEvent
from sqlalchemy import Engine
from sqlalchemy.ext.asyncio import AsyncEngine
Expand Down Expand Up @@ -876,6 +877,22 @@ def force_flush(self, timeout_millis: int = 3_000) -> bool: # pragma: no cover
"""
return self._config.force_flush(timeout_millis)

def url_from_eval(self, report: EvaluationReport[Any, Any, Any]) -> str | None:
"""Generate a Logfire URL to view an evaluation report.

Args:
report: An evaluation report from `pydantic_evals`.

Returns:
The URL string, or `None` if the project URL or trace/span IDs are not available.
"""
project_url = self._config.project_url
trace_id = report.trace_id
span_id = report.span_id
if not project_url or not trace_id or not span_id:
return None
return f'{project_url}/evals/compare?experiment={trace_id}-{span_id}'
Copy link

Copilot AI Feb 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

project_url may come from credentials and could plausibly include a trailing / (e.g. if provided by a user or backend). Appending /{...} unconditionally can yield a double slash in the final URL. Consider normalizing with project_url = project_url.rstrip('/') before building the path (and ideally use urllib.parse.urlencode for the query string).

Copilot uses AI. Check for mistakes.

def log_slow_async_callbacks(self, slow_duration: float = 0.1) -> AbstractContextManager[None]:
"""Log a warning whenever a function running in the asyncio event loop blocks for too long.

Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@ dev = [
"pytest-xdist>=3.6.1",
"openai-agents[voice]>=0.0.7",
"pydantic-ai-slim>=0.0.39",
"pydantic-evals>=0.0.39",
"langchain>=0.0.27",
"langchain-openai>=0.3.17",
"langgraph >= 0",
Expand Down
4 changes: 4 additions & 0 deletions tests/test_logfire_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,10 @@ def func() -> None: ...
pass
logfire__all__.remove('attach_context')

assert hasattr(logfire_api, 'url_from_eval')
logfire_api.url_from_eval(MagicMock(trace_id='abc', span_id='def'))
logfire__all__.remove('url_from_eval')

# If it's not empty, it means that some of the __all__ members are not tested.
assert logfire__all__ == set(), logfire__all__

Expand Down
64 changes: 64 additions & 0 deletions tests/test_url_from_eval.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from __future__ import annotations

import pytest

try:
from pydantic_evals.reporting import EvaluationReport
except Exception:
pytest.skip('pydantic_evals not importable (likely pydantic < 2.8)', allow_module_level=True)

import logfire
from logfire._internal.config import LogfireConfig


def _make_report(trace_id: str | None = None, span_id: str | None = None) -> EvaluationReport:
return EvaluationReport(name='test', cases=[], trace_id=trace_id, span_id=span_id)


def test_url_from_eval_with_project_url() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
config.project_url = 'https://logfire.pydantic.dev/my-org/my-project'
instance = logfire.Logfire(config=config)

report = _make_report(trace_id='abc123', span_id='def456')
result = instance.url_from_eval(report)
assert result == 'https://logfire.pydantic.dev/my-org/my-project/evals/compare?experiment=abc123-def456'


def test_url_from_eval_no_project_url() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
instance = logfire.Logfire(config=config)

report = _make_report(trace_id='abc123', span_id='def456')
result = instance.url_from_eval(report)
assert result is None


def test_url_from_eval_no_trace_id() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
config.project_url = 'https://logfire.pydantic.dev/my-org/my-project'
instance = logfire.Logfire(config=config)

report = _make_report(span_id='def456')
result = instance.url_from_eval(report)
assert result is None


def test_url_from_eval_no_span_id() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
config.project_url = 'https://logfire.pydantic.dev/my-org/my-project'
instance = logfire.Logfire(config=config)

report = _make_report(trace_id='abc123')
result = instance.url_from_eval(report)
assert result is None


def test_url_from_eval_no_ids() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
config.project_url = 'https://logfire.pydantic.dev/my-org/my-project'
instance = logfire.Logfire(config=config)

report = _make_report()
result = instance.url_from_eval(report)
assert result is None
73 changes: 65 additions & 8 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading