Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 29 additions & 21 deletions pydantic_ai_slim/pydantic_ai/models/google.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,15 +549,20 @@ async def _map_messages(
elif isinstance(part, UserPromptPart):
message_parts.extend(await self._map_user_prompt(part))
elif isinstance(part, ToolReturnPart):
message_parts.append(
{
'function_response': {
'name': part.tool_name,
'response': part.model_response_object(),
'id': part.tool_call_id,
if self.profile.supports_tools:
message_parts.append(
{
'function_response': {
'name': part.tool_name,
'response': part.model_response_object(),
'id': part.tool_call_id,
}
}
}
)
)
else:
message_parts.append(
{'text': f'Tool {part.tool_name} responded with {part.model_response_object()}'}
)
elif isinstance(part, RetryPromptPart):
if part.tool_name is None:
message_parts.append({'text': part.model_response()})
Expand All @@ -577,7 +582,7 @@ async def _map_messages(
if message_parts:
contents.append({'role': 'user', 'parts': message_parts})
elif isinstance(m, ModelResponse):
maybe_content = _content_model_response(m, self.system)
maybe_content = _content_model_response(m, self.system, self.profile.supports_tools)
if maybe_content:
contents.append(maybe_content)
else:
Expand Down Expand Up @@ -786,7 +791,7 @@ def timestamp(self) -> datetime:
return self._timestamp


def _content_model_response(m: ModelResponse, provider_name: str) -> ContentDict | None: # noqa: C901
def _content_model_response(m: ModelResponse, provider_name: str, supports_tools: bool) -> ContentDict | None: # noqa: C901
parts: list[PartDict] = []
thinking_part_signature: str | None = None
function_call_requires_signature: bool = True
Expand All @@ -803,17 +808,20 @@ def _content_model_response(m: ModelResponse, provider_name: str) -> ContentDict
thinking_part_signature = None

if isinstance(item, ToolCallPart):
function_call = FunctionCallDict(name=item.tool_name, args=item.args_as_dict(), id=item.tool_call_id)
part['function_call'] = function_call
if function_call_requires_signature and not part.get('thought_signature'):
# Per https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#migrating_from_other_models:
# > If you are transferring a conversation trace from another model (e.g., Gemini 2.5) or injecting
# > a custom function call that was not generated by Gemini 3, you will not have a valid signature.
# > To bypass strict validation in these specific scenarios, populate the field with this specific
# > dummy string: "thoughtSignature": "context_engineering_is_the_way_to_go"
part['thought_signature'] = b'context_engineering_is_the_way_to_go'
# Only the first function call requires a signature
function_call_requires_signature = False
if supports_tools:
function_call = FunctionCallDict(name=item.tool_name, args=item.args_as_dict(), id=item.tool_call_id)
part['function_call'] = function_call
if function_call_requires_signature and not part.get('thought_signature'):
# Per https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#migrating_from_other_models:
# > If you are transferring a conversation trace from another model (e.g., Gemini 2.5) or injecting
# > a custom function call that was not generated by Gemini 3, you will not have a valid signature.
# > To bypass strict validation in these specific scenarios, populate the field with this specific
# > dummy string: "thoughtSignature": "context_engineering_is_the_way_to_go"
part['thought_signature'] = b'context_engineering_is_the_way_to_go'
# Only the first function call requires a signature
function_call_requires_signature = False
else:
part['text'] = f'Tool {item.tool_name} called with args {item.args_as_json_str()}'
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because the arguments and return value can be very long, I want those on a separate line.

Also because there could be parallel tool calls with the same name, we should include the tool call ID.

I'd also want to format this in a more special way so the model understands this is not actually a user/assistant "message".

So I suggest doing something similar to how we send text files to OpenAI:

def _inline_text_file_part(text: str, *, media_type: str, identifier: str) -> ChatCompletionContentPartTextParam:
text = '\n'.join(
[
f'-----BEGIN FILE id="{identifier}" type="{media_type}"-----',
text,
f'-----END FILE id="{identifier}"-----',
]
)
return ChatCompletionContentPartTextParam(text=text, type='text')

And we're doing that for Google as well: #3269.

elif isinstance(item, TextPart):
part['text'] = item.content
elif isinstance(item, ThinkingPart):
Expand Down
26 changes: 26 additions & 0 deletions tests/models/test_google.py
Original file line number Diff line number Diff line change
Expand Up @@ -4307,6 +4307,7 @@ def test_google_thought_signature_on_thinking_part():
provider_name='google-gla',
),
'google-gla',
True,
)
new_google_response = _content_model_response(
ModelResponse(
Expand All @@ -4318,6 +4319,7 @@ def test_google_thought_signature_on_thinking_part():
provider_name='google-gla',
),
'google-gla',
True,
)
assert old_google_response == snapshot(
{
Expand All @@ -4342,6 +4344,7 @@ def test_google_thought_signature_on_thinking_part():
provider_name='google-gla',
),
'google-gla',
True,
)
new_google_response = _content_model_response(
ModelResponse(
Expand All @@ -4352,6 +4355,7 @@ def test_google_thought_signature_on_thinking_part():
provider_name='google-gla',
),
'google-gla',
True,
)
assert old_google_response == snapshot(
{
Expand All @@ -4376,6 +4380,7 @@ def test_google_thought_signature_on_thinking_part():
provider_name='google-gla',
),
'google-gla',
True,
)
new_google_response = _content_model_response(
ModelResponse(
Expand All @@ -4386,6 +4391,7 @@ def test_google_thought_signature_on_thinking_part():
provider_name='google-gla',
),
'google-gla',
True,
)
assert old_google_response == snapshot(
{
Expand All @@ -4412,6 +4418,7 @@ def test_google_missing_tool_call_thought_signature():
provider_name='openai',
),
'google-gla',
True,
)
assert google_response == snapshot(
{
Expand All @@ -4425,3 +4432,22 @@ def test_google_missing_tool_call_thought_signature():
],
}
)


def test_google_mapping_when_does_not_support_tools():
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should also test the mapping of the tool result

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@DouweM Is there a way I can generate a casette file (I was initially attempting to right my own, but after thinking about it that feels...wrong)? I wanted to test for this behavior via Agent.run.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@sidg1215 If you run the test with uv run pytest <path>::<test> --record-mide=recreate, vcrpy will automatically record them for you :)

google_response = _content_model_response(
ModelResponse(
parts=[
ToolCallPart(tool_name='tool', args={}, tool_call_id='tool_call_id'),
],
provider_name='openai',
),
'google-gla',
False,
)
assert google_response == snapshot(
{
'role': 'model',
'parts': [{'text': 'Tool tool called with args {}'}],
}
)
Loading