Skip to content
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions sentry_sdk/integrations/langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,13 @@ def on_chat_model_start(
SPANDATA.GEN_AI_AGENT_NAME, agent_metadata["lc_agent_name"]
)

run_name = kwargs.get("name")
if run_name:
span.set_data(
SPANDATA.GEN_AI_PIPELINE_NAME,
run_name,
)

for key, attribute in DATA_FIELDS.items():
if key in all_params and all_params[key] is not None:
set_data_normalized(span, attribute, all_params[key], unpack=False)
Expand Down
24 changes: 24 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1102,6 +1102,30 @@ def nonstreaming_responses_model_response():
)


@pytest.fixture
def nonstreaming_chat_completions_model_response():
return openai.types.chat.ChatCompletion(
id="chat-id",
choices=[
openai.types.chat.chat_completion.Choice(
index=0,
finish_reason="stop",
message=openai.types.chat.ChatCompletionMessage(
role="assistant", content="the model response"
),
)
],
created=10000000,
model="response-model-id",
object="chat.completion",
usage=openai.types.CompletionUsage(
completion_tokens=10,
prompt_tokens=20,
total_tokens=30,
),
)


@pytest.fixture
def responses_tool_call_model_responses():
def inner(
Expand Down
53 changes: 53 additions & 0 deletions tests/integrations/langchain/test_langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@
)

LANGCHAIN_VERSION = package_version("langchain")
LANGCHAIN_OPENAI_VERSION = package_version("langchain-openai")


@tool
Expand Down Expand Up @@ -170,6 +171,58 @@ def test_langchain_text_completion(
assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15


def test_langchain_chat(
sentry_init,
capture_events,
get_model_response,
nonstreaming_chat_completions_model_response,
):
sentry_init(
integrations=[
LangchainIntegration(
include_prompts=True,
)
],
traces_sample_rate=1.0,
send_default_pii=True,
)
events = capture_events()

request_headers = {}
# Changed in https://github.com/langchain-ai/langchain/pull/32655
if LANGCHAIN_OPENAI_VERSION >= (0, 3, 32):
request_headers["X-Stainless-Raw-Response"] = "True"

model_response = get_model_response(
nonstreaming_chat_completions_model_response,
serialize_pydantic=True,
request_headers=request_headers,
)

llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0,
openai_api_key="badkey",
)

with patch.object(
llm.client._client._client,
"send",
return_value=model_response,
) as _:
with start_transaction():
llm.invoke(
"How many letters in the word eudca",
config={"run_name": "my-snazzy-pipeline"},
)

tx = events[0]

chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat")
assert len(chat_spans) == 1
assert chat_spans[0]["data"]["gen_ai.pipeline.name"] == "my-snazzy-pipeline"


@pytest.mark.skipif(
LANGCHAIN_VERSION < (1,),
reason="LangChain 1.0+ required (ONE AGENT refactor)",
Expand Down
109 changes: 68 additions & 41 deletions tests/integrations/openai/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,8 @@
Omit = None

from openai import AsyncOpenAI, OpenAI, AsyncStream, Stream, OpenAIError
from openai.types import CompletionUsage, CreateEmbeddingResponse, Embedding
from openai.types.chat import ChatCompletion, ChatCompletionMessage, ChatCompletionChunk
from openai.types.chat.chat_completion import Choice
from openai.types import CreateEmbeddingResponse, Embedding
from openai.types.chat import ChatCompletionChunk
from openai.types.chat.chat_completion_chunk import ChoiceDelta, Choice as DeltaChoice
from openai.types.create_embedding_response import Usage as EmbeddingTokenUsage

Expand Down Expand Up @@ -60,26 +59,6 @@ async def __call__(self, *args, **kwargs):


OPENAI_VERSION = package_version("openai")
EXAMPLE_CHAT_COMPLETION = ChatCompletion(
id="chat-id",
choices=[
Choice(
index=0,
finish_reason="stop",
message=ChatCompletionMessage(
role="assistant", content="the model response"
),
)
],
created=10000000,
model="response-model-id",
object="chat.completion",
usage=CompletionUsage(
completion_tokens=10,
prompt_tokens=20,
total_tokens=30,
),
)


if SKIP_RESPONSES_TESTS:
Expand Down Expand Up @@ -131,7 +110,11 @@ async def __call__(self, *args, **kwargs):
],
)
def test_nonstreaming_chat_completion_no_prompts(
sentry_init, capture_events, send_default_pii, include_prompts
sentry_init,
capture_events,
send_default_pii,
include_prompts,
nonstreaming_chat_completions_model_response,
):
sentry_init(
integrations=[OpenAIIntegration(include_prompts=include_prompts)],
Expand All @@ -141,7 +124,9 @@ def test_nonstreaming_chat_completion_no_prompts(
events = capture_events()

client = OpenAI(api_key="z")
client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION)
client.chat.completions._post = mock.Mock(
return_value=nonstreaming_chat_completions_model_response
)

with start_transaction(name="openai tx"):
response = (
Expand Down Expand Up @@ -228,7 +213,13 @@ def test_nonstreaming_chat_completion_no_prompts(
),
],
)
def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, request):
def test_nonstreaming_chat_completion(
sentry_init,
capture_events,
messages,
request,
nonstreaming_chat_completions_model_response,
):
sentry_init(
integrations=[OpenAIIntegration(include_prompts=True)],
traces_sample_rate=1.0,
Expand All @@ -237,7 +228,9 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, req
events = capture_events()

client = OpenAI(api_key="z")
client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION)
client.chat.completions._post = mock.Mock(
return_value=nonstreaming_chat_completions_model_response
)

with start_transaction(name="openai tx"):
response = (
Expand Down Expand Up @@ -307,7 +300,11 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, req
],
)
async def test_nonstreaming_chat_completion_async_no_prompts(
sentry_init, capture_events, send_default_pii, include_prompts
sentry_init,
capture_events,
send_default_pii,
include_prompts,
nonstreaming_chat_completions_model_response,
):
sentry_init(
integrations=[OpenAIIntegration(include_prompts=include_prompts)],
Expand All @@ -317,7 +314,9 @@ async def test_nonstreaming_chat_completion_async_no_prompts(
events = capture_events()

client = AsyncOpenAI(api_key="z")
client.chat.completions._post = mock.AsyncMock(return_value=EXAMPLE_CHAT_COMPLETION)
client.chat.completions._post = mock.AsyncMock(
return_value=nonstreaming_chat_completions_model_response
)

with start_transaction(name="openai tx"):
response = await client.chat.completions.create(
Expand Down Expand Up @@ -403,7 +402,11 @@ async def test_nonstreaming_chat_completion_async_no_prompts(
],
)
async def test_nonstreaming_chat_completion_async(
sentry_init, capture_events, messages, request
sentry_init,
capture_events,
messages,
request,
nonstreaming_chat_completions_model_response,
):
sentry_init(
integrations=[OpenAIIntegration(include_prompts=True)],
Expand All @@ -413,7 +416,9 @@ async def test_nonstreaming_chat_completion_async(
events = capture_events()

client = AsyncOpenAI(api_key="z")
client.chat.completions._post = AsyncMock(return_value=EXAMPLE_CHAT_COMPLETION)
client.chat.completions._post = AsyncMock(
return_value=nonstreaming_chat_completions_model_response
)

with start_transaction(name="openai tx"):
response = await client.chat.completions.create(
Expand Down Expand Up @@ -1551,15 +1556,19 @@ async def test_embeddings_create_raises_error_async(
assert event["level"] == "error"


def test_span_origin_nonstreaming_chat(sentry_init, capture_events):
def test_span_origin_nonstreaming_chat(
sentry_init, capture_events, nonstreaming_chat_completions_model_response
):
sentry_init(
integrations=[OpenAIIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()

client = OpenAI(api_key="z")
client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION)
client.chat.completions._post = mock.Mock(
return_value=nonstreaming_chat_completions_model_response
)

with start_transaction(name="openai tx"):
client.chat.completions.create(
Expand All @@ -1573,15 +1582,19 @@ def test_span_origin_nonstreaming_chat(sentry_init, capture_events):


@pytest.mark.asyncio
async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_events):
async def test_span_origin_nonstreaming_chat_async(
sentry_init, capture_events, nonstreaming_chat_completions_model_response
):
sentry_init(
integrations=[OpenAIIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()

client = AsyncOpenAI(api_key="z")
client.chat.completions._post = AsyncMock(return_value=EXAMPLE_CHAT_COMPLETION)
client.chat.completions._post = AsyncMock(
return_value=nonstreaming_chat_completions_model_response
)

with start_transaction(name="openai tx"):
await client.chat.completions.create(
Expand Down Expand Up @@ -3125,15 +3138,19 @@ async def test_streaming_responses_api_async(
"tools",
[[], None, NOT_GIVEN, omit],
)
def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools):
def test_empty_tools_in_chat_completion(
sentry_init, capture_events, tools, nonstreaming_chat_completions_model_response
):
sentry_init(
integrations=[OpenAIIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()

client = OpenAI(api_key="z")
client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION)
client.chat.completions._post = mock.Mock(
return_value=nonstreaming_chat_completions_model_response
)

with start_transaction(name="openai tx"):
client.chat.completions.create(
Expand Down Expand Up @@ -3164,7 +3181,11 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools):
],
)
def test_openai_message_role_mapping(
sentry_init, capture_events, test_message, expected_role
sentry_init,
capture_events,
test_message,
expected_role,
nonstreaming_chat_completions_model_response,
):
"""Test that OpenAI integration properly maps message roles like 'ai' to 'assistant'"""

Expand All @@ -3176,7 +3197,9 @@ def test_openai_message_role_mapping(
events = capture_events()

client = OpenAI(api_key="z")
client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION)
client.chat.completions._post = mock.Mock(
return_value=nonstreaming_chat_completions_model_response
)

test_messages = [test_message]

Expand All @@ -3197,7 +3220,9 @@ def test_openai_message_role_mapping(
assert stored_messages[0]["role"] == expected_role


def test_openai_message_truncation(sentry_init, capture_events):
def test_openai_message_truncation(
sentry_init, capture_events, nonstreaming_chat_completions_model_response
):
"""Test that large messages are truncated properly in OpenAI integration."""
sentry_init(
integrations=[OpenAIIntegration(include_prompts=True)],
Expand All @@ -3207,7 +3232,9 @@ def test_openai_message_truncation(sentry_init, capture_events):
events = capture_events()

client = OpenAI(api_key="z")
client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION)
client.chat.completions._post = mock.Mock(
return_value=nonstreaming_chat_completions_model_response
)

large_content = (
"This is a very long message that will exceed our size limits. " * 1000
Expand Down
Loading