Skip to content
4 changes: 2 additions & 2 deletions sentry_sdk/integrations/langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -975,7 +975,7 @@ def new_invoke(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
origin=LangchainIntegration.origin,
) as span:
if run_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, run_name)
span.set_data(SPANDATA.GEN_AI_FUNCTION_ID, run_name)

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
Expand Down Expand Up @@ -1035,7 +1035,7 @@ def new_stream(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
span.__enter__()

if run_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, run_name)
span.set_data(SPANDATA.GEN_AI_FUNCTION_ID, run_name)

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
Expand Down
169 changes: 169 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1398,6 +1398,175 @@ def nonstreaming_google_genai_model_response():
)


@pytest.fixture
def streaming_chat_completions_model_responses():
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think this fixture needs to be placed in the global fixtures because it's very specific to tests within a particular file.

If you can foresee us re-using this across multiple test files within the AI integrations, then we should move this into a conftest.py file placed within tests/integrations or, if even better if this would only be used by the langchain tests, within the langchain folder

def inner():
yield [
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
role="assistant"
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
tool_calls=[
openai.types.chat.chat_completion_chunk.ChoiceDeltaToolCall(
index=0,
id="call_BbeyNhCKa6kYLYzrD40NGm3b",
type="function",
function=openai.types.chat.chat_completion_chunk.ChoiceDeltaToolCallFunction(
name="get_word_length",
arguments="",
),
),
],
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
tool_calls=[
openai.types.chat.chat_completion_chunk.ChoiceDeltaToolCall(
index=0,
function=openai.types.chat.chat_completion_chunk.ChoiceDeltaToolCallFunction(
arguments='{"word": "eudca"}',
),
),
],
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
content="5"
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(),
finish_reason="function_call",
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[],
usage=openai.types.chat.chat_completion_chunk.CompletionUsage(
prompt_tokens=142,
completion_tokens=50,
total_tokens=192,
),
),
]

yield [
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-2",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
role="assistant"
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-2",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
content="The word eudca has 5 letters."
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-2",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(),
finish_reason="stop",
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-2",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[],
usage=openai.types.chat.chat_completion_chunk.CompletionUsage(
prompt_tokens=89,
completion_tokens=28,
total_tokens=117,
),
),
]

return inner


@pytest.fixture
def responses_tool_call_model_responses():
def inner(
Expand Down
Loading
Loading