Skip to content

Commit cb1a310

Browse files
committed
feat: Implement and document parallel function calling for Google Gemini.
1 parent 983af6b commit cb1a310

File tree

5 files changed

+431
-129
lines changed

5 files changed

+431
-129
lines changed

docs/EXAMPLES.md

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -380,6 +380,54 @@ console.log(result.answer);
380380
// "The current weather in Tokyo is 72°F and sunny. Recent news about Tokyo includes..."
381381
```
382382

383+
### 4.5. Parallel Function Calling
384+
385+
Execute multiple tools in parallel for complex queries.
386+
387+
```typescript
388+
import { ax, ai, type AxFunction } from '@ax-llm/ax';
389+
390+
const functions: AxFunction[] = [
391+
{
392+
name: 'getCurrentWeather',
393+
description: 'get the current weather for a location',
394+
func: async ({ location }) => ({ temperature: '22C', condition: 'Sunny' }),
395+
parameters: {
396+
type: 'object',
397+
properties: {
398+
location: { type: 'string' }
399+
},
400+
required: ['location']
401+
}
402+
},
403+
{
404+
name: 'getCurrentTime',
405+
description: 'get the current time for a location',
406+
func: async ({ location }) => ({ time: '14:30' }),
407+
parameters: {
408+
type: 'object',
409+
properties: {
410+
location: { type: 'string' }
411+
},
412+
required: ['location']
413+
}
414+
}
415+
];
416+
417+
const agent = ax(
418+
'query:string -> report:string "Comprehensive report"',
419+
{ functions }
420+
);
421+
422+
const result = await agent.forward(
423+
ai({ name: 'google-gemini', config: { model: 'gemini-1.5-pro-latest' } }),
424+
{ query: "Compare the weather and time in Tokyo, New York, and London." }
425+
);
426+
427+
// The AI will call weather and time functions for all 3 cities in parallel
428+
console.log(result.report);
429+
```
430+
383431
### 5. Streaming Responses
384432

385433
Stream responses for real-time user feedback.

src/ax/ai/google-gemini/api.test.ts

Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -253,4 +253,106 @@ describe('AxAIGoogleGemini model key preset merging', () => {
253253
expect(assistantMsg.parts[0].functionCall.name).toBe('foo');
254254
expect(assistantMsg.parts[0].thought_signature).toBe('sig123');
255255
});
256+
257+
it('groups parallel function responses into a single user turn', async () => {
258+
const ai = new AxAIGoogleGemini({
259+
apiKey: 'key',
260+
config: { model: AxAIGoogleGeminiModel.Gemini3ProPreview },
261+
models: [],
262+
});
263+
264+
const capture: { lastBody?: any } = {};
265+
const fetch = createMockFetch(
266+
{
267+
candidates: [
268+
{ content: { parts: [{ text: 'ok' }] }, finishReason: 'STOP' },
269+
],
270+
},
271+
capture
272+
);
273+
ai.setOptions({ fetch });
274+
275+
const history: any[] = [
276+
{ role: 'user', content: 'call parallel' },
277+
{
278+
role: 'assistant',
279+
functionCalls: [
280+
{
281+
function: { name: 'f1', params: '{}' },
282+
id: 'id1',
283+
type: 'function',
284+
},
285+
{
286+
function: { name: 'f2', params: '{}' },
287+
id: 'id2',
288+
type: 'function',
289+
},
290+
],
291+
},
292+
{ role: 'function', functionId: 'f1', result: 'r1' },
293+
{ role: 'function', functionId: 'f2', result: 'r2' },
294+
];
295+
296+
await ai.chat({ chatPrompt: history }, { stream: false });
297+
298+
const reqBody = capture.lastBody;
299+
// Expected: User, Model, User (with 2 parts)
300+
expect(reqBody.contents).toHaveLength(3);
301+
const lastUserMsg = reqBody.contents[2];
302+
expect(lastUserMsg.role).toBe('user');
303+
expect(lastUserMsg.parts).toHaveLength(2);
304+
expect(lastUserMsg.parts[0].functionResponse.name).toBe('f1');
305+
expect(lastUserMsg.parts[1].functionResponse.name).toBe('f2');
306+
});
307+
308+
it('does not set thought: true on text part when function calls are present', async () => {
309+
const ai = new AxAIGoogleGemini({
310+
apiKey: 'key',
311+
config: { model: AxAIGoogleGeminiModel.Gemini3ProPreview },
312+
models: [],
313+
});
314+
315+
const capture: { lastBody?: any } = {};
316+
const fetch = createMockFetch(
317+
{
318+
candidates: [
319+
{ content: { parts: [{ text: 'ok' }] }, finishReason: 'STOP' },
320+
],
321+
},
322+
capture
323+
);
324+
ai.setOptions({ fetch });
325+
326+
const history: any[] = [
327+
{ role: 'user', content: 'call with thought' },
328+
{
329+
role: 'assistant',
330+
thoughtBlock: { data: 'Thinking...', signature: 'sig1' },
331+
functionCalls: [
332+
{
333+
function: { name: 'f1', params: '{}' },
334+
id: 'id1',
335+
type: 'function',
336+
},
337+
],
338+
},
339+
{ role: 'function', functionId: 'f1', result: 'r1' },
340+
];
341+
342+
await ai.chat({ chatPrompt: history }, { stream: false });
343+
344+
const reqBody = capture.lastBody;
345+
const assistantMsg = reqBody.contents[1];
346+
expect(assistantMsg.role).toBe('model');
347+
expect(assistantMsg.parts).toHaveLength(2);
348+
349+
// Part 0: Text (Thought) - Should NOT have thought: true
350+
expect(assistantMsg.parts[0].text).toBe('Thinking...');
351+
expect(assistantMsg.parts[0].thought).toBeUndefined();
352+
expect(assistantMsg.parts[0].thought_signature).toBeUndefined();
353+
354+
// Part 1: Function Call - Should have signature
355+
expect(assistantMsg.parts[1].functionCall.name).toBe('f1');
356+
expect(assistantMsg.parts[1].thought_signature).toBe('sig1');
357+
});
256358
});

0 commit comments

Comments
 (0)