Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions logfire-api/logfire_api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,8 @@ def instrument_system_metrics(self, *args, **kwargs) -> None: ...

def instrument_mcp(self, *args, **kwargs) -> None: ...

def url_from_eval(self, *args, **kwargs) -> None: ...

def shutdown(self, *args, **kwargs) -> None: ...

DEFAULT_LOGFIRE_INSTANCE = Logfire()
Expand Down Expand Up @@ -254,6 +256,7 @@ def shutdown(self, *args, **kwargs) -> None: ...
instrument_mcp = DEFAULT_LOGFIRE_INSTANCE.instrument_mcp
shutdown = DEFAULT_LOGFIRE_INSTANCE.shutdown
suppress_scopes = DEFAULT_LOGFIRE_INSTANCE.suppress_scopes
url_from_eval = DEFAULT_LOGFIRE_INSTANCE.url_from_eval

def loguru_handler() -> dict[str, Any]:
return {}
Expand Down
2 changes: 2 additions & 0 deletions logfire/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
with_tags = DEFAULT_LOGFIRE_INSTANCE.with_tags
# with_trace_sample_rate = DEFAULT_LOGFIRE_INSTANCE.with_trace_sample_rate
with_settings = DEFAULT_LOGFIRE_INSTANCE.with_settings
url_from_eval = DEFAULT_LOGFIRE_INSTANCE.url_from_eval

# Logging
log = DEFAULT_LOGFIRE_INSTANCE.log
Expand Down Expand Up @@ -176,4 +177,5 @@ def loguru_handler() -> Any:
'set_baggage',
'get_context',
'attach_context',
'url_from_eval',
)
49 changes: 31 additions & 18 deletions logfire/_internal/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -696,6 +696,8 @@ def _load_configuration(
self.advanced = advanced

self.additional_span_processors = additional_span_processors
self.project_url: str | None = None
self._check_tokens_thread: Thread | None = None
Comment on lines +699 to +700
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🟡 Stale project_url race when configure() is called multiple times due to orphaned background thread

When configure() is called a second time, _load_configuration() resets self._check_tokens_thread = None and self.project_url = None without joining the previous background thread. The orphaned thread from the first configure() can still write a stale project_url after the reset, and because self.project_url or ... is used, the new thread will then see it as already set and won't overwrite it.

Root cause and scenario
  1. logfire.configure(token='token-A')_load_configuration resets state, initialize() starts thread T1 which calls self._initialize_credentials_from_token('token-A') (an HTTP request)
  2. Before T1 finishes, logfire.configure(token='token-B') is called → _load_configuration() at logfire/_internal/config.py:699-700 resets self.project_url = None and self._check_tokens_thread = None (losing the T1 reference), then initialize() starts thread T2 for token-B
  3. T1 finishes its HTTP request and executes self.project_url = self.project_url or validated_credentials.project_url at logfire/_internal/config.py:1004, setting project_url to project-A's URL
  4. T2 finishes and runs the same line, but since self.project_url is already set (by T1), the or short-circuits and project-B's URL is never stored
  5. url_from_eval() at logfire/_internal/main.py:891 calls wait_for_token_validation() which only joins T2 (T1's reference was lost), reads self.project_url, and returns a URL pointing to the wrong project

Impact: url_from_eval could return a URL for a previous project configuration's dashboard instead of the current one. This is limited to the uncommon case of calling configure() multiple times with different tokens where the first token validation thread hasn't finished.

Prompt for agents
In _load_configuration (logfire/_internal/config.py around line 699), before resetting self._check_tokens_thread = None, join the old thread (if it exists) to ensure it has finished writing. This prevents the orphaned thread from writing a stale project_url after the reset. For example:

    self.additional_span_processors = additional_span_processors
    self.project_url: str | None = None
    if hasattr(self, '_check_tokens_thread') and self._check_tokens_thread is not None:
        self._check_tokens_thread.join(timeout=0)  # non-blocking, just try
    self._check_tokens_thread: Thread | None = None

Alternatively, a more robust approach is to use a generation counter: increment a counter in _load_configuration, capture it in the check_tokens closure, and only write self.project_url if the generation still matches. This avoids blocking configure() on an HTTP request.
Open in Devin Review

Was this helpful? React with 👍 or 👎 to provide feedback.


if metrics is None:
metrics = MetricsOptions()
Expand Down Expand Up @@ -943,19 +945,22 @@ def add_span_processor(span_processor: SpanProcessor) -> None:
if isinstance(self.metrics, MetricsOptions):
metric_readers = list(self.metrics.additional_readers)

# Try loading credentials from a file.
# We do this before checking send_to_logfire so that project_url
# is available for url_from_eval even when not sending data.
try:
credentials = LogfireCredentials.load_creds_file(self.data_dir)
except Exception:
# If we have tokens configured by other means, e.g. the env, no need to worry about the creds file.
if self.send_to_logfire and not self.token:
raise
credentials = None
if credentials is not None:
self.project_url = self.project_url or credentials.project_url

if self.send_to_logfire:
show_project_link: bool = self.console and self.console.show_project_link or False

# Try loading credentials from a file.
# If that works, we can use it to immediately print the project link.
try:
credentials = LogfireCredentials.load_creds_file(self.data_dir)
except Exception:
# If we have tokens configured by other means, e.g. the env, no need to worry about the creds file.
if not self.token:
raise
credentials = None

if not self.token and self.send_to_logfire is True and credentials is None:
# If we don't have tokens or credentials from a file,
# try initializing a new project and writing a new creds file.
Expand All @@ -969,6 +974,7 @@ def add_span_processor(span_processor: SpanProcessor) -> None:
# This means that e.g. a token in an env var takes priority over a token in a creds file.
self.token = self.token or credentials.token
self.advanced.base_url = self.advanced.base_url or credentials.logfire_api_url
self.project_url = self.project_url or credentials.project_url

if self.token:
# Convert to list for iteration (handles both str and list[str])
Expand All @@ -994,18 +1000,16 @@ def check_tokens():
with suppress_instrumentation():
for token in token_list:
validated_credentials = self._initialize_credentials_from_token(token)
if (
validated_credentials is not None
and show_project_link
and token not in printed_tokens
):
validated_credentials.print_token_summary()
if validated_credentials is not None:
self.project_url = self.project_url or validated_credentials.project_url
if show_project_link and token not in printed_tokens:
validated_credentials.print_token_summary()

if emscripten: # pragma: no cover
check_tokens()
else:
thread = Thread(target=check_tokens, name='check_logfire_token')
thread.start()
self._check_tokens_thread = Thread(target=check_tokens, name='check_logfire_token')
self._check_tokens_thread.start()

# Create exporters for each token
for token in token_list:
Expand Down Expand Up @@ -1227,6 +1231,15 @@ def warn_if_not_initialized(self, message: str):
category=LogfireNotConfiguredWarning,
)

def wait_for_token_validation(self) -> None:
"""Wait for the background token validation thread to complete.

This ensures that `project_url` is populated when the token is provided
via environment variable rather than a credentials file.
"""
if self._check_tokens_thread is not None:
self._check_tokens_thread.join()

def _initialize_credentials_from_token(self, token: str) -> LogfireCredentials | None:
return LogfireCredentials.from_token(token, requests.Session(), self.advanced.generate_base_url(token))

Expand Down
20 changes: 20 additions & 0 deletions logfire/_internal/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@
from flask.app import Flask
from opentelemetry.instrumentation.asgi.types import ClientRequestHook, ClientResponseHook, ServerRequestHook
from opentelemetry.metrics import _Gauge as Gauge
from pydantic_evals.reporting import EvaluationReport
from pymongo.monitoring import CommandFailedEvent, CommandStartedEvent, CommandSucceededEvent
from sqlalchemy import Engine
from sqlalchemy.ext.asyncio import AsyncEngine
Expand Down Expand Up @@ -876,6 +877,25 @@ def force_flush(self, timeout_millis: int = 3_000) -> bool: # pragma: no cover
"""
return self._config.force_flush(timeout_millis)

def url_from_eval(self, report: EvaluationReport[Any, Any, Any]) -> str | None:
"""Generate a Logfire URL to view an evaluation report.

Args:
report: An evaluation report from `pydantic_evals`.

Returns:
The URL string, or `None` if the project URL or trace/span IDs are not available.
"""
# Wait for the background token validation thread to finish,
# since it may populate project_url when no credentials file exists.
self._config.wait_for_token_validation()
project_url = self._config.project_url
trace_id = report.trace_id
span_id = report.span_id
if not project_url or not trace_id or not span_id:
return None
return f'{project_url.rstrip("/")}/evals/compare?experiment={trace_id}-{span_id}'

def log_slow_async_callbacks(self, slow_duration: float = 0.1) -> AbstractContextManager[None]:
"""Log a warning whenever a function running in the asyncio event loop blocks for too long.

Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@ dev = [
"pytest-xdist>=3.6.1",
"openai-agents[voice]>=0.0.7",
"pydantic-ai-slim>=0.0.39",
"pydantic-evals>=0.0.39",
"langchain>=0.0.27",
"langchain-openai>=0.3.17",
"langgraph >= 0",
Expand Down
4 changes: 4 additions & 0 deletions tests/test_logfire_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,10 @@ def func() -> None: ...
pass
logfire__all__.remove('attach_context')

assert hasattr(logfire_api, 'url_from_eval')
logfire_api.url_from_eval(MagicMock(trace_id='abc', span_id='def'))
logfire__all__.remove('url_from_eval')

# If it's not empty, it means that some of the __all__ members are not tested.
assert logfire__all__ == set(), logfire__all__

Expand Down
98 changes: 98 additions & 0 deletions tests/test_url_from_eval.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
from __future__ import annotations

import pytest
import requests_mock

try:
from pydantic_evals.reporting import EvaluationReport
except Exception:
pytest.skip('pydantic_evals not importable (likely pydantic < 2.8)', allow_module_level=True)

import logfire
from logfire._internal.config import LogfireConfig


def _make_report(trace_id: str | None = None, span_id: str | None = None) -> EvaluationReport:
return EvaluationReport(name='test', cases=[], trace_id=trace_id, span_id=span_id)


def test_url_from_eval_with_project_url() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
config.project_url = 'https://logfire.pydantic.dev/my-org/my-project'
instance = logfire.Logfire(config=config)

report = _make_report(trace_id='abc123', span_id='def456')
result = instance.url_from_eval(report)
assert result == 'https://logfire.pydantic.dev/my-org/my-project/evals/compare?experiment=abc123-def456'


def test_url_from_eval_no_project_url() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
instance = logfire.Logfire(config=config)

report = _make_report(trace_id='abc123', span_id='def456')
result = instance.url_from_eval(report)
assert result is None


def test_url_from_eval_no_trace_id() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
config.project_url = 'https://logfire.pydantic.dev/my-org/my-project'
instance = logfire.Logfire(config=config)

report = _make_report(span_id='def456')
result = instance.url_from_eval(report)
assert result is None


def test_url_from_eval_no_span_id() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
config.project_url = 'https://logfire.pydantic.dev/my-org/my-project'
instance = logfire.Logfire(config=config)

report = _make_report(trace_id='abc123')
result = instance.url_from_eval(report)
assert result is None


def test_url_from_eval_trailing_slash() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
config.project_url = 'https://logfire.pydantic.dev/my-org/my-project/'
instance = logfire.Logfire(config=config)

report = _make_report(trace_id='abc123', span_id='def456')
result = instance.url_from_eval(report)
assert result == 'https://logfire.pydantic.dev/my-org/my-project/evals/compare?experiment=abc123-def456'


def test_url_from_eval_no_ids() -> None:
config = LogfireConfig(send_to_logfire=False, console=False)
config.project_url = 'https://logfire.pydantic.dev/my-org/my-project'
instance = logfire.Logfire(config=config)

report = _make_report()
result = instance.url_from_eval(report)
assert result is None


def test_url_from_eval_waits_for_token_validation() -> None:
"""Test that url_from_eval waits for the background token validation thread
to populate project_url when the token is provided directly (no creds file)."""
with requests_mock.Mocker() as mocker:
mocker.get(
'https://logfire-us.pydantic.dev/v1/info',
json={
'project_name': 'myproject',
'project_url': 'https://logfire-us.pydantic.dev/my-org/my-project',
},
)
logfire.configure(
send_to_logfire=True,
token='fake-token',
console=False,
)

report = _make_report(trace_id='abc123', span_id='def456')
# url_from_eval should wait for the background thread and return the URL
result = logfire.url_from_eval(report)
assert result == 'https://logfire-us.pydantic.dev/my-org/my-project/evals/compare?experiment=abc123-def456'
61 changes: 59 additions & 2 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading