Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions camel/utils/langfuse.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,7 @@ def get_current_agent_session_id() -> Optional[str]:
Returns:
Optional[str]: The session ID for the current agent.
"""
if is_langfuse_available():
return _agent_session_id_var.get()
return None
return _agent_session_id_var.get()


def update_langfuse_trace(
Expand Down
37 changes: 37 additions & 0 deletions test/models/test_openai_compatible_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
from unittest.mock import MagicMock

import pytest
from httpx import URL
from pytest import MonkeyPatch
Expand Down Expand Up @@ -61,3 +63,38 @@ def test_openai_compatible_model_apikey_from_env(monkeypatch: MonkeyPatch):
assert model._client.base_url == URL(url)
assert model._async_client.api_key == api_key
assert model._async_client.base_url == URL(url)


def test_openai_compatible_responses_uses_agent_session_scope_when_langfuse_is_disabled(
monkeypatch,
):
monkeypatch.setattr("camel.utils.langfuse._langfuse_configured", False)

model = OpenAICompatibleModel(
model_type="dummy-model",
api_key="dummy",
url="https://example.invalid",
api_mode="responses",
client=MagicMock(),
async_client=MagicMock(),
)

from camel.utils.langfuse import set_current_agent_session_id

set_current_agent_session_id("agent-a")
model._save_response_chain_state(
model._get_response_chain_session_key(), "resp_a", 2
)

set_current_agent_session_id("agent-b")
chain_state = model._prepare_responses_input_and_chain(
[
{"role": "system", "content": "system-b"},
{"role": "user", "content": "hello-b"},
{"role": "assistant", "content": "reply-b"},
],
chain_enabled=True,
)

assert chain_state["session_key"] == "agent-b"
assert chain_state["previous_response_id"] is None
53 changes: 53 additions & 0 deletions test/models/test_openai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,59 @@ def test_responses_mode_uses_previous_response_id_and_delta_input():
assert second_call_kwargs["input"][-1]["content"] == "Continue"


def test_responses_mode_uses_agent_session_scope_when_langfuse_is_disabled(
monkeypatch,
):
monkeypatch.setattr("camel.utils.langfuse._langfuse_configured", False)

with patch("camel.models.openai_model.OpenAI") as mock_openai:
mock_client = MagicMock()
mock_openai.return_value = mock_client

mock_client.responses.create.return_value = {
"id": "resp_any",
"created_at": 1741294021,
"usage": {
"input_tokens": 1,
"output_tokens": 1,
"total_tokens": 2,
},
"output": [
{
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": "ok"}],
}
],
}

model = OpenAIModel(
model_type=ModelType.GPT_4O_MINI,
api_mode="responses",
api_key="dummy",
)

from camel.utils.langfuse import set_current_agent_session_id

set_current_agent_session_id("agent-a")
model._save_response_chain_state(
model._get_response_chain_session_key(), "resp_a", 2
)

set_current_agent_session_id("agent-b")
chain_state = model._prepare_responses_input_and_chain(
[
{"role": "system", "content": "system-b"},
{"role": "user", "content": "hello-b"},
{"role": "assistant", "content": "reply-b"},
],
chain_enabled=True,
)

assert chain_state["session_key"] == "agent-b"
assert chain_state["previous_response_id"] is None


def test_responses_mode_normalizes_function_tools_schema():
with patch("camel.models.openai_model.OpenAI") as mock_openai:
mock_client = MagicMock()
Expand Down
Loading