Skip to content

mistral_common.protocol.instruct.request

ChatCompletionRequest(**data)

Bases: BaseCompletionRequest, Generic[ChatMessageType]

Request for a chat completion.

Attributes:

Name Type Description
model Optional[str]

The model to use for the chat completion.

messages List[ChatMessageType]

The messages to use for the chat completion.

response_format ResponseFormat

The format of the response.

tools Optional[List[Tool]]

The tools to use for the chat completion.

tool_choice ToolChoice

The tool choice to use for the chat completion.

truncate_for_context_length bool

Whether to truncate the messages for the context length.

continue_final_message bool

Whether to continue the final message.

Examples:

>>> from mistral_common.protocol.instruct.messages import UserMessage, AssistantMessage
>>> from mistral_common.protocol.instruct.tool_calls import ToolTypes, Function
>>> request = ChatCompletionRequest(
...     messages=[
...         UserMessage(content="Hello!"),
...         AssistantMessage(content="Hi! How can I help you?"),
...     ],
...     response_format=ResponseFormat(type=ResponseFormats.text),
...     tools=[Tool(type=ToolTypes.function, function=Function(name="get_weather", parameters={}))],
...     tool_choice=ToolChoice.auto,
...     truncate_for_context_length=True,
... )
Source code in .venv/lib/python3.13/site-packages/pydantic/main.py
def __init__(self, /, **data: Any) -> None:
    """Create a new model by parsing and validating input data from keyword arguments.

    Raises [`ValidationError`][pydantic_core.ValidationError] if the input data cannot be
    validated to form a valid model.

    `self` is explicitly positional-only to allow `self` as a field name.
    """
    # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks
    __tracebackhide__ = True
    validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
    if self is not validated_self:
        warnings.warn(
            'A custom validator is returning a value other than `self`.\n'
            "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n"
            'See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.',
            stacklevel=2,
        )

from_openai(messages, tools=None, continue_final_message=False, **kwargs) classmethod

Create a chat completion request from the OpenAI format.

Parameters:

Name Type Description Default
messages List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, Any]]]]]]]

The messages in the OpenAI format.

required
tools Optional[List[Dict[str, Any]]]

The tools in the OpenAI format.

None
continue_final_message bool

Whether to continue the final message.

False
**kwargs Any

Additional keyword arguments to pass to the constructor. These should be the same as the fields of the request class or the OpenAI API equivalent.

{}

Returns:

Type Description
ChatCompletionRequest

The chat completion request.

Source code in src/mistral_common/protocol/instruct/request.py
@classmethod
def from_openai(
    cls,
    messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, Any]]]]]]],
    tools: Optional[List[Dict[str, Any]]] = None,
    continue_final_message: bool = False,
    **kwargs: Any,
) -> "ChatCompletionRequest":
    r"""Create a chat completion request from the OpenAI format.

    Args:
        messages: The messages in the OpenAI format.
        tools: The tools in the OpenAI format.
        continue_final_message: Whether to continue the final message.
        **kwargs: Additional keyword arguments to pass to the constructor. These should be the same as the fields
            of the request class or the OpenAI API equivalent.


    Returns:
        The chat completion request.
    """
    if "seed" in kwargs and "random_seed" in kwargs:
        raise ValueError("Cannot specify both `seed` and `random_seed`.")

    random_seed = kwargs.pop("seed", None) or kwargs.pop("random_seed", None)

    _check_openai_fields_names(set(cls.model_fields.keys()), set(kwargs.keys()))

    converted_messages: list[ChatMessage] = convert_openai_messages(messages)

    converted_tools = convert_openai_tools(tools) if tools is not None else None

    return cls(
        messages=converted_messages,  # type: ignore[arg-type]
        tools=converted_tools,
        random_seed=random_seed,
        continue_final_message=continue_final_message,
        **kwargs,
    )

to_openai(**kwargs)

Convert the request messages and tools into the OpenAI format.

Parameters:

Name Type Description Default
kwargs Any

Additional parameters to be added to the request.

{}

Returns:

Type Description
Dict[str, List[Dict[str, Any]]]

The request in the OpenAI format.

Examples:

>>> from mistral_common.protocol.instruct.messages import UserMessage
>>> from mistral_common.protocol.instruct.tool_calls import Tool, Function
>>> request = ChatCompletionRequest(messages=[UserMessage(content="Hello, how are you?")], temperature=0.15)
>>> request.to_openai(stream=True)
{'temperature': 0.15, 'top_p': 1.0, 'response_format': {'type': 'text'}, 'tool_choice': 'auto', 'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'stream': True}
>>> request = ChatCompletionRequest(messages=[UserMessage(content="Hello, how are you?")], tools=[
...     Tool(function=Function(
...         name="get_current_weather",
...         description="Get the current weather in a given location",
...         parameters={
...             "type": "object",
...             "properties": {
...                 "location": {
...                     "type": "string",
...                     "description": "The city and state, e.g. San Francisco, CA",
...                 },
...                 "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
...             },
...             "required": ["location"],
...         },
...     ),
... )])
>>> request.to_openai()
{'temperature': 0.7, 'top_p': 1.0, 'response_format': {'type': 'text'}, 'tool_choice': 'auto', 'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}}]}
Source code in src/mistral_common/protocol/instruct/request.py
def to_openai(self, **kwargs: Any) -> Dict[str, List[Dict[str, Any]]]:
    r"""Convert the request messages and tools into the OpenAI format.

    Args:
        kwargs: Additional parameters to be added to the request.

    Returns:
        The request in the OpenAI format.

    Examples:
        >>> from mistral_common.protocol.instruct.messages import UserMessage
        >>> from mistral_common.protocol.instruct.tool_calls import Tool, Function
        >>> request = ChatCompletionRequest(messages=[UserMessage(content="Hello, how are you?")], temperature=0.15)
        >>> request.to_openai(stream=True)
        {'temperature': 0.15, 'top_p': 1.0, 'response_format': {'type': 'text'}, 'tool_choice': 'auto', 'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'stream': True}
        >>> request = ChatCompletionRequest(messages=[UserMessage(content="Hello, how are you?")], tools=[
        ...     Tool(function=Function(
        ...         name="get_current_weather",
        ...         description="Get the current weather in a given location",
        ...         parameters={
        ...             "type": "object",
        ...             "properties": {
        ...                 "location": {
        ...                     "type": "string",
        ...                     "description": "The city and state, e.g. San Francisco, CA",
        ...                 },
        ...                 "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
        ...             },
        ...             "required": ["location"],
        ...         },
        ...     ),
        ... )])
        >>> request.to_openai()
        {'temperature': 0.7, 'top_p': 1.0, 'response_format': {'type': 'text'}, 'tool_choice': 'auto', 'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}}]}
    """  # noqa: E501

    # Handle messages and tools separately.
    openai_request: Dict[str, Any] = self.model_dump(
        exclude={"messages", "tools", "truncate_for_context_length"}, exclude_none=True
    )

    # Rename random_seed to seed.
    seed = openai_request.pop("random_seed", None)
    if seed is not None:
        openai_request["seed"] = seed

    if self.truncate_for_context_length:
        raise NotImplementedError("Truncating for context length is not implemented for OpenAI requests.")

    for kwarg in kwargs:
        # Check for duplicate keyword arguments.
        if kwarg in openai_request:
            raise ValueError(f"Duplicate keyword argument: {kwarg}")
        # Check if kwarg should have been set in the request.
        # This occurs when the field is different between the Mistral and OpenAI API.
        elif kwarg in ChatCompletionRequest.model_fields:
            raise ValueError(f"Keyword argument {kwarg} is already set in the request.")
        # Check if kwarg is a valid OpenAI field name.
        elif not _is_openai_field_name(kwarg):
            raise ValueError(f"Invalid keyword argument: {kwarg}, it should be an OpenAI field name.")

    openai_messages = []
    for message in self.messages:
        openai_messages.append(message.to_openai())

    openai_request["messages"] = openai_messages
    if self.tools is not None:
        openai_request["tools"] = [tool.to_openai() for tool in self.tools]

    openai_request.update(kwargs)

    return openai_request

InstructRequest(**data)

Bases: MistralBase, Generic[ChatMessageType, ToolType]

A valid Instruct request to be tokenized.

Attributes:

Name Type Description
messages List[ChatMessageType]

The history of the conversation.

system_prompt Optional[str]

The system prompt to be used for the conversation.

available_tools Optional[List[ToolType]]

The tools available to the assistant.

truncate_at_max_tokens Optional[int]

The maximum number of tokens to truncate the conversation at.

continue_final_message bool

Whether to continue the final message.

Examples:

>>> from mistral_common.protocol.instruct.messages import UserMessage, SystemMessage
>>> request = InstructRequest(
...     messages=[UserMessage(content="Hello, how are you?")], system_prompt="You are a helpful assistant."
... )
Source code in .venv/lib/python3.13/site-packages/pydantic/main.py
def __init__(self, /, **data: Any) -> None:
    """Create a new model by parsing and validating input data from keyword arguments.

    Raises [`ValidationError`][pydantic_core.ValidationError] if the input data cannot be
    validated to form a valid model.

    `self` is explicitly positional-only to allow `self` as a field name.
    """
    # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks
    __tracebackhide__ = True
    validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
    if self is not validated_self:
        warnings.warn(
            'A custom validator is returning a value other than `self`.\n'
            "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n"
            'See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.',
            stacklevel=2,
        )

from_openai(messages, tools=None, continue_final_message=False, **kwargs) classmethod

Create an instruct request from the OpenAI format.

Parameters:

Name Type Description Default
messages List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, Any]]]]]]]

The messages in the OpenAI format.

required
tools Optional[List[Dict[str, Any]]]

The tools in the OpenAI format.

None
continue_final_message bool

Whether to continue the final message.

False
**kwargs Any

Additional keyword arguments to pass to the constructor. These should be the same as the fields of the request class or the OpenAI API equivalent.

{}

Returns:

Type Description
InstructRequest

The instruct request.

Source code in src/mistral_common/protocol/instruct/request.py
@classmethod
def from_openai(
    cls,
    messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, Any]]]]]]],
    tools: Optional[List[Dict[str, Any]]] = None,
    continue_final_message: bool = False,
    **kwargs: Any,
) -> "InstructRequest":
    r"""Create an instruct request from the OpenAI format.

    Args:
        messages: The messages in the OpenAI format.
        tools: The tools in the OpenAI format.
        continue_final_message: Whether to continue the final message.
        **kwargs: Additional keyword arguments to pass to the constructor. These should be the same as the fields
            of the request class or the OpenAI API equivalent.

    Returns:
        The instruct request.
    """
    # Handle the case where the tools are passed as `available_tools`.
    # This is to maintain compatibility with the OpenAI API.
    if "available_tools" in kwargs:
        if tools is None:
            tools = kwargs.pop("available_tools")
        else:
            raise ValueError("Cannot specify both `tools` and `available_tools`.")

    _check_openai_fields_names(set(cls.model_fields.keys()), set(kwargs.keys()))

    converted_messages: list[ChatMessage] = convert_openai_messages(messages)

    converted_tools = convert_openai_tools(tools) if tools is not None else None

    return cls(
        messages=converted_messages,  # type: ignore[arg-type]
        available_tools=converted_tools,  # type: ignore[arg-type]
        continue_final_message=continue_final_message,
        **kwargs,
    )

to_openai(**kwargs)

Convert the request messages and tools into the OpenAI format.

Parameters:

Name Type Description Default
kwargs Any

Additional parameters to be added to the request.

{}

Returns:

Type Description
Dict[str, List[Dict[str, Any]]]

The request in the OpenAI format.

Examples:

>>> from mistral_common.protocol.instruct.messages import UserMessage
>>> from mistral_common.protocol.instruct.tool_calls import Tool, Function
>>> request = InstructRequest(messages=[UserMessage(content="Hello, how are you?")])
>>> request.to_openai(temperature=0.15, stream=True)
{'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'temperature': 0.15, 'stream': True}
>>> request = InstructRequest(
...     messages=[UserMessage(content="Hello, how are you?")],
...     available_tools=[
...     Tool(function=Function(
...         name="get_current_weather",
...         description="Get the current weather in a given location",
...         parameters={
...             "type": "object",
...             "properties": {
...                 "location": {
...                     "type": "string",
...                     "description": "The city and state, e.g. San Francisco, CA",
...                 },
...                 "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
...             },
...             "required": ["location"],
...         },
...     ),
... )])
>>> request.to_openai()
{'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}}]}
Source code in src/mistral_common/protocol/instruct/request.py
def to_openai(self, **kwargs: Any) -> Dict[str, List[Dict[str, Any]]]:
    r"""Convert the request messages and tools into the OpenAI format.

    Args:
        kwargs: Additional parameters to be added to the request.

    Returns:
        The request in the OpenAI format.

    Examples:
        >>> from mistral_common.protocol.instruct.messages import UserMessage
        >>> from mistral_common.protocol.instruct.tool_calls import Tool, Function
        >>> request = InstructRequest(messages=[UserMessage(content="Hello, how are you?")])
        >>> request.to_openai(temperature=0.15, stream=True)
        {'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'temperature': 0.15, 'stream': True}
        >>> request = InstructRequest(
        ...     messages=[UserMessage(content="Hello, how are you?")],
        ...     available_tools=[
        ...     Tool(function=Function(
        ...         name="get_current_weather",
        ...         description="Get the current weather in a given location",
        ...         parameters={
        ...             "type": "object",
        ...             "properties": {
        ...                 "location": {
        ...                     "type": "string",
        ...                     "description": "The city and state, e.g. San Francisco, CA",
        ...                 },
        ...                 "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
        ...             },
        ...             "required": ["location"],
        ...         },
        ...     ),
        ... )])
        >>> request.to_openai()
        {'continue_final_message': False, 'messages': [{'role': 'user', 'content': 'Hello, how are you?'}], 'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}}]}
    """  # noqa: E501

    # Handle messages, tools, and truncate_at_max_tokens separately.
    openai_request: Dict[str, Any] = self.model_dump(
        exclude={"messages", "available_tools", "truncate_at_max_tokens"}, exclude_none=True
    )

    for kwarg in kwargs:
        # Check for duplicate keyword arguments.
        if kwarg in openai_request:
            raise ValueError(f"Duplicate keyword argument: {kwarg}")
        # Check if kwarg should have been set in the request.
        # This occurs when the field is different between the Mistral and OpenAI API.
        elif kwarg in InstructRequest.model_fields:
            raise ValueError(f"Keyword argument {kwarg} is already set in the request.")
        # Check if the keyword argument is a valid OpenAI field name.
        elif not _is_openai_field_name(kwarg):
            raise ValueError(f"Invalid keyword argument: {kwarg}, it should be an OpenAI field name.")

    openai_messages: list[dict[str, Any]] = []
    if self.system_prompt is not None:
        openai_messages.append({"role": "system", "content": self.system_prompt})

    for message in self.messages:
        openai_messages.append(message.to_openai())

    openai_request["messages"] = openai_messages
    if self.available_tools is not None:
        # Rename available_tools to tools
        openai_request["tools"] = [tool.to_openai() for tool in self.available_tools]

    if self.truncate_at_max_tokens is not None:
        raise NotImplementedError("Truncating at max tokens is not implemented for OpenAI requests.")

    openai_request.update(kwargs)

    return openai_request

ResponseFormat(**data)

Bases: MistralBase

The format of the response.

Attributes:

Name Type Description
type ResponseFormats

The type of the response.

Examples:

>>> response_format = ResponseFormat(type=ResponseFormats.text)
Source code in .venv/lib/python3.13/site-packages/pydantic/main.py
def __init__(self, /, **data: Any) -> None:
    """Create a new model by parsing and validating input data from keyword arguments.

    Raises [`ValidationError`][pydantic_core.ValidationError] if the input data cannot be
    validated to form a valid model.

    `self` is explicitly positional-only to allow `self` as a field name.
    """
    # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks
    __tracebackhide__ = True
    validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
    if self is not validated_self:
        warnings.warn(
            'A custom validator is returning a value other than `self`.\n'
            "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n"
            'See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.',
            stacklevel=2,
        )

ResponseFormats

Bases: str, Enum

Enum of the different formats of an instruct response.

Attributes:

Name Type Description
text

The response is a plain text.

json

The response is a JSON object.

Examples:

>>> response_format = ResponseFormats.text