Skip to content

mistral_common.protocol.instruct.request

ChatCompletionRequest(**data)

Bases: BaseCompletionRequest, Generic[ChatMessageType]

Request for a chat completion.

Attributes:

Name Type Description
model str | None

The model to use for the chat completion.

messages list[ChatMessageType]

The messages to use for the chat completion.

response_format ResponseFormat

The format of the response.

tools list[Tool] | None

The tools to use for the chat completion.

tool_choice ToolChoice

The tool choice to use for the chat completion.

truncate_for_context_length bool

Whether to truncate the messages for the context length.

continue_final_message bool

Whether to continue the final message.

reasoning_effort ReasoningEffort | None

Controls how much reasoning effort the model should apply.

Examples:

>>> from mistral_common.protocol.instruct.messages import UserMessage, AssistantMessage
>>> from mistral_common.protocol.instruct.tool_calls import ToolTypes, Function
>>> request = ChatCompletionRequest(
...     messages=[
...         UserMessage(content="Hello!"),
...         AssistantMessage(content="Hi! How can I help you?"),
...     ],
...     response_format=ResponseFormat(type=ResponseFormats.text),
...     tools=[Tool(type=ToolTypes.function, function=Function(name="get_weather", parameters={}))],
...     tool_choice=ToolChoiceEnum.auto,
...     truncate_for_context_length=True,
... )
Source code in .venv/lib/python3.14/site-packages/pydantic/main.py
def __init__(self, /, **data: Any) -> None:
    """Create a new model by parsing and validating input data from keyword arguments.

    Raises [`ValidationError`][pydantic_core.ValidationError] if the input data cannot be
    validated to form a valid model.

    `self` is explicitly positional-only to allow `self` as a field name.
    """
    # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks
    __tracebackhide__ = True
    validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
    if self is not validated_self:
        warnings.warn(
            'A custom validator is returning a value other than `self`.\n'
            "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n"
            'See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.',
            stacklevel=2,
        )

from_openai(messages, tools=None, continue_final_message=False, **kwargs) classmethod

Create a chat completion request from the OpenAI format.

Parameters:

Name Type Description Default
messages list[dict[str, str | list[dict[str, str | dict[str, Any]]]]]

The messages in the OpenAI format.

required
tools list[dict[str, Any]] | None

The tools in the OpenAI format.

None
continue_final_message bool

Whether to continue the final message.

False
**kwargs Any

Additional keyword arguments to pass to the constructor. These should be the same as the fields of the request class or the OpenAI API equivalent.

{}

Returns:

Type Description
ChatCompletionRequest

The chat completion request.

Source code in src/mistral_common/protocol/instruct/request.py
@classmethod
def from_openai(
    cls,
    messages: list[dict[str, str | list[dict[str, str | dict[str, Any]]]]],
    tools: list[dict[str, Any]] | None = None,
    continue_final_message: bool = False,
    **kwargs: Any,
) -> "ChatCompletionRequest":
    r"""Create a chat completion request from the OpenAI format.

    Args:
        messages: The messages in the OpenAI format.
        tools: The tools in the OpenAI format.
        continue_final_message: Whether to continue the final message.
        **kwargs: Additional keyword arguments to pass to the constructor. These should be the same as the fields
            of the request class or the OpenAI API equivalent.


    Returns:
        The chat completion request.
    """
    if "seed" in kwargs and "random_seed" in kwargs:
        raise ValueError("Cannot specify both `seed` and `random_seed`.")

    random_seed = kwargs.pop("seed", None) or kwargs.pop("random_seed", None)

    _check_openai_fields_names(set(cls.model_fields.keys()), set(kwargs.keys()))

    converted_messages: list[ChatMessage] = convert_openai_messages(messages)

    converted_tools = convert_openai_tools(tools) if tools is not None else None

    return cls(
        messages=converted_messages,  # type: ignore[arg-type]
        tools=converted_tools,
        random_seed=random_seed,
        continue_final_message=continue_final_message,
        **kwargs,
    )

to_openai(**kwargs)

Convert the request messages and tools into the OpenAI format.

Parameters:

Name Type Description Default
kwargs Any

Additional parameters to be added to the request.

{}

Returns:

Type Description
dict[str, Any]

The request in the OpenAI format.

Examples:

>>> from mistral_common.protocol.instruct.messages import UserMessage
>>> from mistral_common.protocol.instruct.tool_calls import Tool, Function
>>> request = ChatCompletionRequest(messages=[UserMessage(content="Hello, how are you?")], temperature=0.15)
>>> request.to_openai(stream=True)
{'temperature': 0.15, 'top_p': 1.0, 'response_format': {'type': 'text'}, 'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tool_choice': 'auto', 'stream': True}
>>> request = ChatCompletionRequest(messages=[UserMessage(content="Hello, how are you?")], tools=[
...     Tool(function=Function(
...         name="get_current_weather",
...         description="Get the current weather in a given location",
...         parameters={
...             "type": "object",
...             "properties": {
...                 "location": {
...                     "type": "string",
...                     "description": "The city and state, e.g. San Francisco, CA",
...                 },
...                 "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
...             },
...             "required": ["location"],
...         },
...     ),
... )])
>>> request.to_openai()
{'temperature': 0.7, 'top_p': 1.0, 'response_format': {'type': 'text'}, 'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}, 'strict': False}}], 'tool_choice': 'auto'}
Source code in src/mistral_common/protocol/instruct/request.py
def to_openai(self, **kwargs: Any) -> dict[str, Any]:
    r"""Convert the request messages and tools into the OpenAI format.

    Args:
        kwargs: Additional parameters to be added to the request.

    Returns:
        The request in the OpenAI format.

    Examples:
        >>> from mistral_common.protocol.instruct.messages import UserMessage
        >>> from mistral_common.protocol.instruct.tool_calls import Tool, Function
        >>> request = ChatCompletionRequest(messages=[UserMessage(content="Hello, how are you?")], temperature=0.15)
        >>> request.to_openai(stream=True)
        {'temperature': 0.15, 'top_p': 1.0, 'response_format': {'type': 'text'}, 'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tool_choice': 'auto', 'stream': True}
        >>> request = ChatCompletionRequest(messages=[UserMessage(content="Hello, how are you?")], tools=[
        ...     Tool(function=Function(
        ...         name="get_current_weather",
        ...         description="Get the current weather in a given location",
        ...         parameters={
        ...             "type": "object",
        ...             "properties": {
        ...                 "location": {
        ...                     "type": "string",
        ...                     "description": "The city and state, e.g. San Francisco, CA",
        ...                 },
        ...                 "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
        ...             },
        ...             "required": ["location"],
        ...         },
        ...     ),
        ... )])
        >>> request.to_openai()
        {'temperature': 0.7, 'top_p': 1.0, 'response_format': {'type': 'text'}, 'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}, 'strict': False}}], 'tool_choice': 'auto'}
    """  # noqa: E501

    # Handle messages and tools separately.
    openai_request: dict[str, Any] = self.model_dump(
        exclude={"messages", "tools", "truncate_for_context_length", "tool_choice"}, exclude_none=True
    )

    # Rename random_seed to seed.
    seed = openai_request.pop("random_seed", None)
    if seed is not None:
        openai_request["seed"] = seed

    if self.truncate_for_context_length:
        raise NotImplementedError("Truncating for context length is not implemented for OpenAI requests.")

    for kwarg in kwargs:
        # Check for duplicate keyword arguments.
        if kwarg in openai_request:
            raise ValueError(f"Duplicate keyword argument: {kwarg}")
        # Check if kwarg should have been set in the request.
        # This occurs when the field is different between the Mistral and OpenAI API.
        elif kwarg in ChatCompletionRequest.model_fields:
            raise ValueError(f"Keyword argument {kwarg} is already set in the request.")
        # Check if kwarg is a valid OpenAI field name.
        elif not _is_openai_field_name(kwarg):
            raise ValueError(f"Invalid keyword argument: {kwarg}, it should be an OpenAI field name.")

    openai_messages = []
    for message in self.messages:
        openai_messages.append(message.to_openai())

    openai_request["messages"] = openai_messages
    if self.tools is not None:
        openai_request["tools"] = [tool.to_openai() for tool in self.tools]

    openai_tool_choice: str | dict[str, Any]
    match self.tool_choice:
        case ToolChoiceEnum.auto | ToolChoiceEnum.none:
            openai_tool_choice = self.tool_choice
        case ToolChoiceEnum.required | ToolChoiceEnum.any:
            openai_tool_choice = ToolChoiceEnum.required.value
        case _:
            openai_tool_choice = self.tool_choice.model_dump()

    openai_request["tool_choice"] = openai_tool_choice

    openai_request.update(kwargs)

    return openai_request

InstructRequest(**data)

Bases: MistralBase, Generic[ChatMessageType, ToolType]

A valid Instruct request to be tokenized.

Attributes:

Name Type Description
messages list[ChatMessageType]

The history of the conversation.

system_prompt str | None

The system prompt to be used for the conversation.

available_tools list[ToolType] | None

The tools available to the assistant.

truncate_at_max_tokens int | None

The maximum number of tokens to truncate the conversation at.

continue_final_message bool

Whether to continue the final message.

settings ModelSettings

Model configuration settings for the request.

Examples:

>>> from mistral_common.protocol.instruct.messages import UserMessage, SystemMessage
>>> request = InstructRequest(
...     messages=[UserMessage(content="Hello, how are you?")], system_prompt="You are a helpful assistant."
... )
Source code in .venv/lib/python3.14/site-packages/pydantic/main.py
def __init__(self, /, **data: Any) -> None:
    """Create a new model by parsing and validating input data from keyword arguments.

    Raises [`ValidationError`][pydantic_core.ValidationError] if the input data cannot be
    validated to form a valid model.

    `self` is explicitly positional-only to allow `self` as a field name.
    """
    # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks
    __tracebackhide__ = True
    validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
    if self is not validated_self:
        warnings.warn(
            'A custom validator is returning a value other than `self`.\n'
            "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n"
            'See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.',
            stacklevel=2,
        )

from_openai(messages, tools=None, continue_final_message=False, **kwargs) classmethod

Create an instruct request from the OpenAI format.

Parameters:

Name Type Description Default
messages list[dict[str, str | list[dict[str, str | dict[str, Any]]]]]

The messages in the OpenAI format.

required
tools list[dict[str, Any]] | None

The tools in the OpenAI format.

None
continue_final_message bool

Whether to continue the final message.

False
**kwargs Any

Additional keyword arguments to pass to the constructor. These should be the same as the fields of the request class or the OpenAI API equivalent.

{}

Returns:

Type Description
InstructRequest

The instruct request.

Source code in src/mistral_common/protocol/instruct/request.py
@classmethod
def from_openai(
    cls,
    messages: list[dict[str, str | list[dict[str, str | dict[str, Any]]]]],
    tools: list[dict[str, Any]] | None = None,
    continue_final_message: bool = False,
    **kwargs: Any,
) -> "InstructRequest":
    r"""Create an instruct request from the OpenAI format.

    Args:
        messages: The messages in the OpenAI format.
        tools: The tools in the OpenAI format.
        continue_final_message: Whether to continue the final message.
        **kwargs: Additional keyword arguments to pass to the constructor. These should be the same as the fields
            of the request class or the OpenAI API equivalent.

    Returns:
        The instruct request.
    """
    # Handle the case where the tools are passed as `available_tools`.
    # This is to maintain compatibility with the OpenAI API.
    if "available_tools" in kwargs:
        if tools is None:
            tools = kwargs.pop("available_tools")
        else:
            raise ValueError("Cannot specify both `tools` and `available_tools`.")

    model_settings_fields = ModelSettings.model_fields.keys()
    instruct_request_fields = cls.model_fields.keys()
    assert instruct_request_fields.isdisjoint(model_settings_fields), (
        "ModelSettings fields should not overlap with InstructRequest fields."
    )
    valid_fields = instruct_request_fields | model_settings_fields
    _check_openai_fields_names(valid_fields, set(kwargs.keys()))

    converted_messages: list[ChatMessage] = convert_openai_messages(messages)

    converted_tools = convert_openai_tools(tools) if tools is not None else None

    model_settings = ModelSettings.from_openai(**{k: v for k, v in kwargs.items() if k in model_settings_fields})
    other_kwargs = {k: v for k, v in kwargs.items() if k not in model_settings_fields}

    return cls(
        messages=converted_messages,  # type: ignore[arg-type]
        available_tools=converted_tools,  # type: ignore[arg-type]
        continue_final_message=continue_final_message,
        settings=model_settings,
        **other_kwargs,
    )

to_openai(**kwargs)

Convert the request messages and tools into the OpenAI format.

Parameters:

Name Type Description Default
kwargs Any

Additional parameters to be added to the request.

{}

Returns:

Type Description
dict[str, list[dict[str, Any]]]

The request in the OpenAI format.

Examples:

>>> from mistral_common.protocol.instruct.messages import UserMessage
>>> from mistral_common.protocol.instruct.tool_calls import Tool, Function
>>> request = InstructRequest(messages=[UserMessage(content="Hello, how are you?")])
>>> request.to_openai(temperature=0.15, stream=True)
{'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'temperature': 0.15, 'stream': True}
>>> request = InstructRequest(
...     messages=[UserMessage(content="Hello, how are you?")],
...     available_tools=[
...     Tool(function=Function(
...         name="get_current_weather",
...         description="Get the current weather in a given location",
...         parameters={
...             "type": "object",
...             "properties": {
...                 "location": {
...                     "type": "string",
...                     "description": "The city and state, e.g. San Francisco, CA",
...                 },
...                 "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
...             },
...             "required": ["location"],
...         },
...     ),
... )])
>>> request.to_openai()
{'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}, 'strict': False}}]}
Source code in src/mistral_common/protocol/instruct/request.py
def to_openai(self, **kwargs: Any) -> dict[str, list[dict[str, Any]]]:
    r"""Convert the request messages and tools into the OpenAI format.

    Args:
        kwargs: Additional parameters to be added to the request.

    Returns:
        The request in the OpenAI format.

    Examples:
        >>> from mistral_common.protocol.instruct.messages import UserMessage
        >>> from mistral_common.protocol.instruct.tool_calls import Tool, Function
        >>> request = InstructRequest(messages=[UserMessage(content="Hello, how are you?")])
        >>> request.to_openai(temperature=0.15, stream=True)
        {'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'temperature': 0.15, 'stream': True}
        >>> request = InstructRequest(
        ...     messages=[UserMessage(content="Hello, how are you?")],
        ...     available_tools=[
        ...     Tool(function=Function(
        ...         name="get_current_weather",
        ...         description="Get the current weather in a given location",
        ...         parameters={
        ...             "type": "object",
        ...             "properties": {
        ...                 "location": {
        ...                     "type": "string",
        ...                     "description": "The city and state, e.g. San Francisco, CA",
        ...                 },
        ...                 "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
        ...             },
        ...             "required": ["location"],
        ...         },
        ...     ),
        ... )])
        >>> request.to_openai()
        {'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}, 'strict': False}}]}
    """  # noqa: E501

    # Handle messages, tools, and truncate_at_max_tokens separately.
    openai_request: dict[str, Any] = self.model_dump(
        exclude={"messages", "available_tools", "truncate_at_max_tokens", "settings"}, exclude_none=True
    )

    for kwarg in kwargs:
        # Check for duplicate keyword arguments.
        if kwarg in openai_request:
            raise ValueError(f"Duplicate keyword argument: {kwarg}")
        # Check if kwarg should have been set in the request.
        # This occurs when the field is different between the Mistral and OpenAI API.
        elif kwarg in InstructRequest.model_fields:
            raise ValueError(f"Keyword argument {kwarg} is already set in the request.")
        # Check if the keyword argument is a valid OpenAI field name.
        elif not _is_openai_field_name(kwarg):
            raise ValueError(f"Invalid keyword argument: {kwarg}, it should be an OpenAI field name.")

    openai_messages: list[dict[str, Any]] = []
    if self.system_prompt is not None:
        openai_messages.append({"role": "system", "content": self.system_prompt})

    for message in self.messages:
        openai_messages.append(message.to_openai())

    openai_request["messages"] = openai_messages
    if self.available_tools is not None:
        # Rename available_tools to tools
        openai_request["tools"] = [tool.to_openai() for tool in self.available_tools]

    if self.truncate_at_max_tokens is not None:
        raise NotImplementedError("Truncating at max tokens is not implemented for OpenAI requests.")

    openai_request.update(kwargs)
    openai_request.update(**self.settings.to_openai())

    return openai_request

ModelSettings(**data)

Bases: MistralBase

Model configuration settings for instruct requests.

This class encapsulates various model configuration options that can be passed to the model during inference. Currently supports reasoning effort configuration, but can be extended with additional settings in the future.

Attributes:

Name Type Description
reasoning_effort ReasoningEffort | None

Controls how much reasoning effort the model should apply when generating responses. Supported for tokenizer >= v15 and not supported for earlier versions.

Source code in .venv/lib/python3.14/site-packages/pydantic/main.py
def __init__(self, /, **data: Any) -> None:
    """Create a new model by parsing and validating input data from keyword arguments.

    Raises [`ValidationError`][pydantic_core.ValidationError] if the input data cannot be
    validated to form a valid model.

    `self` is explicitly positional-only to allow `self` as a field name.
    """
    # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks
    __tracebackhide__ = True
    validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
    if self is not validated_self:
        warnings.warn(
            'A custom validator is returning a value other than `self`.\n'
            "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n"
            'See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.',
            stacklevel=2,
        )

from_openai(**kwargs) classmethod

Create ModelSettings from OpenAI keywords API format.

Source code in src/mistral_common/protocol/instruct/request.py
@classmethod
def from_openai(cls, **kwargs: Any) -> "ModelSettings":
    r"""Create ModelSettings from OpenAI keywords API format."""
    return ModelSettings.model_validate(kwargs)

none() staticmethod

Create a ModelSettings instance with default (None) values.

Source code in src/mistral_common/protocol/instruct/request.py
@staticmethod
def none() -> "ModelSettings":
    r"""Create a ModelSettings instance with default (None) values."""
    return ModelSettings()

to_openai()

Convert ModelSettings to OpenAI API format with non-None values.

Source code in src/mistral_common/protocol/instruct/request.py
def to_openai(self) -> dict[str, Any]:
    r"""Convert ModelSettings to OpenAI API format with non-None values."""
    return self.model_dump(exclude_none=True)

ReasoningEffort

Bases: str, Enum

Controls how much reasoning effort the model should apply.

Attributes:

Name Type Description
none

No additional reasoning effort.

high

High reasoning effort for complex tasks.

ResponseFormat(**data)

Bases: MistralBase

The format of the response.

Attributes:

Name Type Description
type ResponseFormats

The type of the response.

Examples:

>>> response_format = ResponseFormat(type=ResponseFormats.text)
Source code in .venv/lib/python3.14/site-packages/pydantic/main.py
def __init__(self, /, **data: Any) -> None:
    """Create a new model by parsing and validating input data from keyword arguments.

    Raises [`ValidationError`][pydantic_core.ValidationError] if the input data cannot be
    validated to form a valid model.

    `self` is explicitly positional-only to allow `self` as a field name.
    """
    # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks
    __tracebackhide__ = True
    validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
    if self is not validated_self:
        warnings.warn(
            'A custom validator is returning a value other than `self`.\n'
            "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n"
            'See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.',
            stacklevel=2,
        )

ResponseFormats

Bases: str, Enum

Enum of the different formats of an instruct response.

Attributes:

Name Type Description
text

The response is a plain text.

json

The response is a JSON object.

Examples:

>>> response_format = ResponseFormats.text