-
Notifications
You must be signed in to change notification settings - Fork 214
feat: add logfire.url_from_eval(report) method
#1694
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
d654bed
17de3ed
7d6cc6e
9e09e23
0b2ad3e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -696,6 +696,8 @@ def _load_configuration( | |
| self.advanced = advanced | ||
|
|
||
| self.additional_span_processors = additional_span_processors | ||
| self.project_url: str | None = None | ||
| self._check_tokens_thread: Thread | None = None | ||
|
Comment on lines
+699
to
+700
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🟡 Stale When Root cause and scenario
Impact: Prompt for agentsWas this helpful? React with 👍 or 👎 to provide feedback. |
||
|
|
||
| if metrics is None: | ||
| metrics = MetricsOptions() | ||
|
|
@@ -943,19 +945,22 @@ def add_span_processor(span_processor: SpanProcessor) -> None: | |
| if isinstance(self.metrics, MetricsOptions): | ||
| metric_readers = list(self.metrics.additional_readers) | ||
|
|
||
| # Try loading credentials from a file. | ||
| # We do this before checking send_to_logfire so that project_url | ||
| # is available for url_from_eval even when not sending data. | ||
| try: | ||
| credentials = LogfireCredentials.load_creds_file(self.data_dir) | ||
| except Exception: | ||
| # If we have tokens configured by other means, e.g. the env, no need to worry about the creds file. | ||
| if self.send_to_logfire and not self.token: | ||
| raise | ||
| credentials = None | ||
| if credentials is not None: | ||
| self.project_url = self.project_url or credentials.project_url | ||
|
|
||
| if self.send_to_logfire: | ||
| show_project_link: bool = self.console and self.console.show_project_link or False | ||
|
|
||
| # Try loading credentials from a file. | ||
| # If that works, we can use it to immediately print the project link. | ||
| try: | ||
| credentials = LogfireCredentials.load_creds_file(self.data_dir) | ||
| except Exception: | ||
| # If we have tokens configured by other means, e.g. the env, no need to worry about the creds file. | ||
| if not self.token: | ||
| raise | ||
| credentials = None | ||
|
|
||
| if not self.token and self.send_to_logfire is True and credentials is None: | ||
| # If we don't have tokens or credentials from a file, | ||
| # try initializing a new project and writing a new creds file. | ||
|
|
@@ -969,6 +974,7 @@ def add_span_processor(span_processor: SpanProcessor) -> None: | |
| # This means that e.g. a token in an env var takes priority over a token in a creds file. | ||
| self.token = self.token or credentials.token | ||
| self.advanced.base_url = self.advanced.base_url or credentials.logfire_api_url | ||
| self.project_url = self.project_url or credentials.project_url | ||
|
|
||
| if self.token: | ||
| # Convert to list for iteration (handles both str and list[str]) | ||
|
|
@@ -994,18 +1000,16 @@ def check_tokens(): | |
| with suppress_instrumentation(): | ||
| for token in token_list: | ||
| validated_credentials = self._initialize_credentials_from_token(token) | ||
| if ( | ||
| validated_credentials is not None | ||
| and show_project_link | ||
| and token not in printed_tokens | ||
| ): | ||
| validated_credentials.print_token_summary() | ||
| if validated_credentials is not None: | ||
| self.project_url = self.project_url or validated_credentials.project_url | ||
| if show_project_link and token not in printed_tokens: | ||
| validated_credentials.print_token_summary() | ||
|
|
||
| if emscripten: # pragma: no cover | ||
| check_tokens() | ||
devin-ai-integration[bot] marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| else: | ||
| thread = Thread(target=check_tokens, name='check_logfire_token') | ||
| thread.start() | ||
| self._check_tokens_thread = Thread(target=check_tokens, name='check_logfire_token') | ||
| self._check_tokens_thread.start() | ||
|
|
||
| # Create exporters for each token | ||
| for token in token_list: | ||
|
|
@@ -1227,6 +1231,15 @@ def warn_if_not_initialized(self, message: str): | |
| category=LogfireNotConfiguredWarning, | ||
| ) | ||
|
|
||
| def wait_for_token_validation(self) -> None: | ||
| """Wait for the background token validation thread to complete. | ||
|
|
||
| This ensures that `project_url` is populated when the token is provided | ||
| via environment variable rather than a credentials file. | ||
| """ | ||
| if self._check_tokens_thread is not None: | ||
| self._check_tokens_thread.join() | ||
|
|
||
| def _initialize_credentials_from_token(self, token: str) -> LogfireCredentials | None: | ||
| return LogfireCredentials.from_token(token, requests.Session(), self.advanced.generate_base_url(token)) | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,98 @@ | ||
| from __future__ import annotations | ||
|
|
||
| import pytest | ||
| import requests_mock | ||
|
|
||
| try: | ||
| from pydantic_evals.reporting import EvaluationReport | ||
| except Exception: | ||
| pytest.skip('pydantic_evals not importable (likely pydantic < 2.8)', allow_module_level=True) | ||
|
|
||
| import logfire | ||
| from logfire._internal.config import LogfireConfig | ||
|
|
||
|
|
||
| def _make_report(trace_id: str | None = None, span_id: str | None = None) -> EvaluationReport: | ||
| return EvaluationReport(name='test', cases=[], trace_id=trace_id, span_id=span_id) | ||
|
|
||
|
|
||
| def test_url_from_eval_with_project_url() -> None: | ||
| config = LogfireConfig(send_to_logfire=False, console=False) | ||
| config.project_url = 'https://logfire.pydantic.dev/my-org/my-project' | ||
| instance = logfire.Logfire(config=config) | ||
|
|
||
| report = _make_report(trace_id='abc123', span_id='def456') | ||
| result = instance.url_from_eval(report) | ||
| assert result == 'https://logfire.pydantic.dev/my-org/my-project/evals/compare?experiment=abc123-def456' | ||
|
|
||
|
|
||
| def test_url_from_eval_no_project_url() -> None: | ||
| config = LogfireConfig(send_to_logfire=False, console=False) | ||
| instance = logfire.Logfire(config=config) | ||
|
|
||
| report = _make_report(trace_id='abc123', span_id='def456') | ||
| result = instance.url_from_eval(report) | ||
| assert result is None | ||
|
|
||
|
|
||
| def test_url_from_eval_no_trace_id() -> None: | ||
| config = LogfireConfig(send_to_logfire=False, console=False) | ||
| config.project_url = 'https://logfire.pydantic.dev/my-org/my-project' | ||
| instance = logfire.Logfire(config=config) | ||
|
|
||
| report = _make_report(span_id='def456') | ||
| result = instance.url_from_eval(report) | ||
| assert result is None | ||
|
|
||
|
|
||
| def test_url_from_eval_no_span_id() -> None: | ||
| config = LogfireConfig(send_to_logfire=False, console=False) | ||
| config.project_url = 'https://logfire.pydantic.dev/my-org/my-project' | ||
| instance = logfire.Logfire(config=config) | ||
|
|
||
| report = _make_report(trace_id='abc123') | ||
| result = instance.url_from_eval(report) | ||
| assert result is None | ||
|
|
||
|
|
||
| def test_url_from_eval_trailing_slash() -> None: | ||
| config = LogfireConfig(send_to_logfire=False, console=False) | ||
| config.project_url = 'https://logfire.pydantic.dev/my-org/my-project/' | ||
| instance = logfire.Logfire(config=config) | ||
|
|
||
| report = _make_report(trace_id='abc123', span_id='def456') | ||
| result = instance.url_from_eval(report) | ||
| assert result == 'https://logfire.pydantic.dev/my-org/my-project/evals/compare?experiment=abc123-def456' | ||
|
|
||
|
|
||
| def test_url_from_eval_no_ids() -> None: | ||
| config = LogfireConfig(send_to_logfire=False, console=False) | ||
| config.project_url = 'https://logfire.pydantic.dev/my-org/my-project' | ||
| instance = logfire.Logfire(config=config) | ||
|
|
||
| report = _make_report() | ||
| result = instance.url_from_eval(report) | ||
| assert result is None | ||
|
|
||
|
|
||
| def test_url_from_eval_waits_for_token_validation() -> None: | ||
| """Test that url_from_eval waits for the background token validation thread | ||
| to populate project_url when the token is provided directly (no creds file).""" | ||
| with requests_mock.Mocker() as mocker: | ||
| mocker.get( | ||
| 'https://logfire-us.pydantic.dev/v1/info', | ||
| json={ | ||
| 'project_name': 'myproject', | ||
| 'project_url': 'https://logfire-us.pydantic.dev/my-org/my-project', | ||
| }, | ||
| ) | ||
| logfire.configure( | ||
| send_to_logfire=True, | ||
| token='fake-token', | ||
| console=False, | ||
| ) | ||
|
|
||
| report = _make_report(trace_id='abc123', span_id='def456') | ||
| # url_from_eval should wait for the background thread and return the URL | ||
| result = logfire.url_from_eval(report) | ||
| assert result == 'https://logfire-us.pydantic.dev/my-org/my-project/evals/compare?experiment=abc123-def456' | ||
alexmojaki marked this conversation as resolved.
Show resolved
Hide resolved
|
||
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Uh oh!
There was an error while loading. Please reload this page.