Skip to content
Open
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 76 additions & 1 deletion camel/models/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
import re
import textwrap
from typing import Type
from typing import Any, Dict, Optional, Type

from pydantic import BaseModel

Expand Down Expand Up @@ -100,3 +100,78 @@ def try_modify_message_with_format(
""" # noqa: E501
)
message["content"] = updated_prompt


def pydantic_to_json_schema_response_format(
response_format: Type[BaseModel],
) -> Dict[str, Any]:
r"""Convert a Pydantic model class to a ``json_schema`` response_format
dict suitable for ``chat.completions.create()``.

The returned dict has the shape::

{
"type": "json_schema",
"json_schema": {
"name": "<ModelClassName>",
"schema": { ... }
}
}

Args:
response_format (Type[BaseModel]): The Pydantic model class.

Returns:
Dict[str, Any]: The response_format dict for the API call.
"""
schema = response_format.model_json_schema()
_enforce_object_additional_properties_false(schema)
return {
"type": "json_schema",
"json_schema": {
"name": response_format.__name__,
"schema": schema,
},
}


def _enforce_object_additional_properties_false(schema: Any) -> None:
r"""Recursively enforce strict object schemas.

OpenAI-compatible structured-output backends frequently reject object
schemas that omit ``additionalProperties``. Mirror the stricter OpenAI
Responses handling so the json_schema fallback remains usable for nested
Pydantic models.
"""
if isinstance(schema, dict):
if (
schema.get("type") == "object"
and "additionalProperties" not in schema
):
schema["additionalProperties"] = False

for value in schema.values():
_enforce_object_additional_properties_false(value)
elif isinstance(schema, list):
for item in schema:
_enforce_object_additional_properties_false(item)


def parse_json_response_to_pydantic(
content: Optional[str],
response_format: Type[BaseModel],
) -> Optional[BaseModel]:
r"""Parse a JSON string returned by the model into a Pydantic instance.

Args:
content (Optional[str]): The raw JSON string from the model response.
response_format (Type[BaseModel]): The Pydantic model class to
validate against.

Returns:
Optional[BaseModel]: The validated Pydantic instance, or ``None``
if *content* is ``None`` or empty.
"""
if not content:
return None
return response_format.model_validate_json(content)
12 changes: 6 additions & 6 deletions camel/models/azure_openai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ class AzureOpenAIModel(BaseModelBackend):
creating a new one. Useful for RL frameworks like AReaL or rLLM
that provide Azure OpenAI-compatible clients. The client should
implement the AzureOpenAI client interface with
`.chat.completions.create()` and `.beta.chat.completions.parse()`
`.chat.completions.create()` and `.chat.completions.parse()`
methods. (default: :obj:`None`)
async_client (Optional[Any], optional): A custom asynchronous
AzureOpenAI client instance. If provided, this client will be
Expand Down Expand Up @@ -380,7 +380,7 @@ def _request_parse(
request_config.pop("stream", None)

return self._call_client(
self._client.beta.chat.completions.parse,
self._client.chat.completions.parse,
messages=messages,
model=str(self.model_type),
**request_config,
Expand All @@ -399,7 +399,7 @@ async def _arequest_parse(
request_config.pop("stream", None)

return await self._acall_client(
self._async_client.beta.chat.completions.parse,
self._async_client.chat.completions.parse,
messages=messages,
model=str(self.model_type),
**request_config,
Expand All @@ -413,7 +413,7 @@ def _request_stream_parse(
) -> ChatCompletionStreamManager[BaseModel]:
r"""Request streaming structured output parsing.

Note: This uses OpenAI's beta streaming API for structured outputs.
Note: This uses OpenAI's streaming API for structured outputs.
"""
request_config = self._prepare_request_config(tools)
# Remove stream from config as it's handled by the stream method
Expand All @@ -436,14 +436,14 @@ async def _arequest_stream_parse(
) -> AsyncChatCompletionStreamManager[BaseModel]:
r"""Request async streaming structured output parsing.

Note: This uses OpenAI's beta streaming API for structured outputs.
Note: This uses OpenAI's streaming API for structured outputs.
"""
request_config = self._prepare_request_config(tools)
# Remove stream from config as it's handled by the stream method
request_config.pop("stream", None)

# Use the beta streaming API for structured outputs
return self._call_client(
return await self._acall_client(
self._async_client.beta.chat.completions.stream,
messages=messages,
model=str(self.model_type),
Expand Down
6 changes: 5 additions & 1 deletion camel/models/cohere_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,11 @@ def _prepare_request(
request_config["tools"] = tools
elif response_format:
try_modify_message_with_format(messages[-1], response_format)
request_config["response_format"] = {"type": "json_object"}
schema = response_format.model_json_schema()
request_config["response_format"] = {
"type": "json_object",
"schema": schema,
}

return request_config

Expand Down
129 changes: 19 additions & 110 deletions camel/models/deepseek_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,40 +13,20 @@
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========

import os
from typing import Any, Dict, List, Optional, Type, Union

from openai import AsyncStream, Stream
from pydantic import BaseModel
from typing import Any, Dict, List, Optional, Union

from camel.configs import DeepSeekConfig
from camel.logger import get_logger
from camel.messages import OpenAIMessage
from camel.models._utils import try_modify_message_with_format
from camel.models.openai_compatible_model import OpenAICompatibleModel
from camel.types import (
ChatCompletion,
ChatCompletionChunk,
ModelType,
StructuredOutputMode,
)
from camel.utils import (
BaseTokenCounter,
api_keys_required,
)

if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
try:
from langfuse.decorators import observe
except ImportError:
from camel.utils import observe
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
try:
from traceroot import trace as observe # type: ignore[import]
except ImportError:
from camel.utils import observe
else:
from camel.utils import observe


logger = get_logger(__name__)

REASONSER_UNSUPPORTED_PARAMS = [
Expand Down Expand Up @@ -125,105 +105,34 @@ def __init__(
**kwargs,
)

def _prepare_request(
@property
def structured_output_mode(self) -> StructuredOutputMode:
return StructuredOutputMode.JSON_OBJECT

def _prepare_request_config(
self,
messages: List[OpenAIMessage],
response_format: Optional[Type[BaseModel]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]:
request_config = self.model_config_dict.copy()
r"""Prepare request config with DeepSeek-specific adjustments.

if self.model_type in [
ModelType.DEEPSEEK_REASONER,
]:
- For Reasoner models, removes unsupported parameters.
- Removes ``strict`` from tool function definitions (unsupported
by DeepSeek).
"""
request_config = super()._prepare_request_config(tools)

if self.model_type in [ModelType.DEEPSEEK_REASONER]:
logger.warning(
"Warning: You are using an DeepSeek Reasoner model, "
"which has certain limitations, reference: "
"`https://api-docs.deepseek.com/guides/reasoning_model"
"#api-parameters`.",
)
request_config = {
key: value
for key, value in request_config.items()
if key not in REASONSER_UNSUPPORTED_PARAMS
}
import copy
for key in REASONSER_UNSUPPORTED_PARAMS:
request_config.pop(key, None)

request_config = copy.deepcopy(self.model_config_dict)
# Remove strict from each tool's function parameters since DeepSeek
# does not support them
if tools:
for tool in tools:
function_dict = tool.get('function', {})
function_dict.pop("strict", None)
request_config["tools"] = tools
elif response_format:
try_modify_message_with_format(messages[-1], response_format)
request_config["response_format"] = {"type": "json_object"}
for tool in request_config.get("tools", []):
tool.get("function", {}).pop("strict", None)

return request_config

@observe()
def _run(
self,
messages: List[OpenAIMessage],
response_format: Optional[Type[BaseModel]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
r"""Runs inference of DeepSeek chat completion.

Args:
messages (List[OpenAIMessage]): Message list with the chat history
in OpenAI API format.

Returns:
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
`ChatCompletion` in the non-stream mode, or
`Stream[ChatCompletionChunk]` in the stream mode.
"""
self._log_and_trace()

request_config = self._prepare_request(
messages, response_format, tools
)

response = self._call_client(
self._client.chat.completions.create,
messages=messages,
model=self.model_type,
**request_config,
)

return response

@observe()
async def _arun(
self,
messages: List[OpenAIMessage],
response_format: Optional[Type[BaseModel]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
r"""Runs inference of DeepSeek chat completion.

Args:
messages (List[OpenAIMessage]): Message list with the chat history
in OpenAI API format.

Returns:
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
`ChatCompletion` in the non-stream mode, or
`AsyncStream[ChatCompletionChunk]` in the stream mode.
"""
self._log_and_trace()

request_config = self._prepare_request(
messages, response_format, tools
)
response = await self._acall_client(
self._async_client.chat.completions.create,
messages=messages,
model=self.model_type,
**request_config,
)

return response
Loading
Loading