Skip to content

Commit 24daf9b

Browse files
SDK regeneration (elevenlabs#564)
Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
1 parent edc6178 commit 24daf9b

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+2243
-91
lines changed

.mock/asyncapi.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ channels:
147147
contexts, flush them, or close them independently. A `close_socket` message can be used to terminate
148148
the entire connection gracefully.
149149
150-
For more information on how to use this API for conversational agents see the [conversational agents guide](/docs/best-practices/conversational-agents).
150+
For more information on best practices for how to use this API, please see the [multi context websocket guide](/docs/cookbooks/multi-context-web-socket).
151151
152152
bindings:
153153
ws:
@@ -624,18 +624,18 @@ components:
624624
items:
625625
$ref: '#/components/schemas/PronunciationDictionaryLocator'
626626
description: Optional list of pronunciation dictionary locators. Can only be provided in the first message for a given context_id.
627-
context_id:
627+
contextId:
628628
type: string
629629
nullable: true
630630
description: An identifier for the text-to-speech context. Allows managing multiple independent audio generation streams over a single WebSocket connection. If omitted, a default context is used.
631631
close_context:
632632
type: boolean
633633
default: false
634-
description: If true, closes the specified `context_id`. No further audio will be generated for this context. The `text` field is ignored.
634+
description: If true, closes the specified `contextId`. No further audio will be generated for this context. The `text` field is ignored.
635635
close_socket:
636636
type: boolean
637637
default: false
638-
description: If true, flushes all contexts and closes the entire WebSocket connection. The `text` and `context_id` fields are ignored.
638+
description: If true, flushes all contexts and closes the entire WebSocket connection. The `text` and `contextId` fields are ignored.
639639

640640
WebsocketTTSServerMessageMulti:
641641
type: object
@@ -762,9 +762,9 @@ components:
762762
alignment:
763763
$ref: '#/components/schemas/Alignment'
764764
nullable: true
765-
context_id:
765+
contextId:
766766
type: string
767-
description: The context_id for which this audio is.
767+
description: The contextId for which this audio is.
768768

769769
FinalOutputMulti:
770770
type: object

.mock/definition/__package__.yml

Lines changed: 110 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -212,26 +212,6 @@ types:
212212
source:
213213
openapi: openapi.json
214214
AgentConfig:
215-
properties:
216-
first_message:
217-
type: optional<string>
218-
docs: >-
219-
If non-empty, the first message the agent will say. If empty, the
220-
agent waits for the user to start the discussion.
221-
default: ''
222-
language:
223-
type: optional<string>
224-
docs: Language of the agent - used for ASR and TTS
225-
default: en
226-
dynamic_variables:
227-
type: optional<DynamicVariablesConfig>
228-
docs: Configuration for dynamic variables
229-
prompt:
230-
type: optional<PromptAgent>
231-
docs: The prompt for the agent
232-
source:
233-
openapi: openapi.json
234-
AgentConfigDbModel:
235215
properties:
236216
first_message:
237217
type: optional<string>
@@ -1253,8 +1233,19 @@ types:
12531233
properties: {}
12541234
source:
12551235
openapi: openapi.json
1236+
ChapterContentBlockInputModelSubType:
1237+
enum:
1238+
- p
1239+
- h1
1240+
- h2
1241+
- h3
1242+
inline: true
1243+
source:
1244+
openapi: openapi.json
12561245
ChapterContentBlockInputModel:
12571246
properties:
1247+
sub_type:
1248+
type: optional<ChapterContentBlockInputModelSubType>
12581249
block_id:
12591250
type: optional<string>
12601251
nodes:
@@ -2135,7 +2126,7 @@ types:
21352126
agent and an AI user.
21362127
properties:
21372128
simulated_user_config:
2138-
type: AgentConfigDbModel
2129+
type: AgentConfig
21392130
tool_mock_config:
21402131
type: optional<map<string, ToolMockConfig>>
21412132
partial_conversation_history:
@@ -2182,6 +2173,9 @@ types:
21822173
expiration_time_unix_secs:
21832174
type: optional<integer>
21842175
docs: The expiration time of the token in unix seconds
2176+
conversation_id:
2177+
type: optional<string>
2178+
docs: The ID of the conversation
21852179
purpose:
21862180
type: optional<ConversationTokenPurpose>
21872181
docs: The purpose of the token
@@ -3017,6 +3011,11 @@ types:
30173011
type: WidgetConfigResponseModel
30183012
source:
30193013
openapi: openapi.json
3014+
GetAgentKnowledgebaseSizeResponseModel:
3015+
properties:
3016+
number_of_pages: double
3017+
source:
3018+
openapi: openapi.json
30203019
GetAgentLinkResponseModel:
30213020
properties:
30223021
agent_id:
@@ -3651,6 +3650,19 @@ types:
36513650
type: optional<map<string, LlmInputOutputTokensUsage>>
36523651
source:
36533652
openapi: openapi.json
3653+
LlmUsageCalculatorLlmResponseModel:
3654+
properties:
3655+
llm:
3656+
type: Llm
3657+
price_per_minute: double
3658+
source:
3659+
openapi: openapi.json
3660+
LlmUsageCalculatorResponseModel:
3661+
properties:
3662+
llm_prices:
3663+
type: list<LlmUsageCalculatorLlmResponseModel>
3664+
source:
3665+
openapi: openapi.json
36543666
LanguageAddedResponse:
36553667
properties:
36563668
version: integer
@@ -4067,6 +4079,66 @@ types:
40674079
docs: Whether the user is on the watchlist.
40684080
source:
40694081
openapi: openapi.json
4082+
NativeMcpToolConfigInput:
4083+
docs: A Native MCP tool is a tool that is used to call a Native MCP server
4084+
properties:
4085+
id:
4086+
type: optional<string>
4087+
default: ''
4088+
name:
4089+
type: string
4090+
validation:
4091+
pattern: ^[a-zA-Z0-9_-]{1,64}$
4092+
minLength: 0
4093+
description:
4094+
type: string
4095+
validation:
4096+
minLength: 0
4097+
response_timeout_secs:
4098+
type: optional<integer>
4099+
docs: The maximum time in seconds to wait for the tool call to complete.
4100+
default: 20
4101+
parameters:
4102+
type: optional<ObjectJsonSchemaPropertyInput>
4103+
docs: Schema for any parameters the LLM needs to provide to the MCP tool.
4104+
mcp_tool_name:
4105+
type: string
4106+
docs: The name of the MCP tool to call
4107+
mcp_server_id:
4108+
type: string
4109+
docs: The id of the MCP server to call
4110+
source:
4111+
openapi: openapi.json
4112+
NativeMcpToolConfigOutput:
4113+
docs: A Native MCP tool is a tool that is used to call a Native MCP server
4114+
properties:
4115+
id:
4116+
type: optional<string>
4117+
default: ''
4118+
name:
4119+
type: string
4120+
validation:
4121+
pattern: ^[a-zA-Z0-9_-]{1,64}$
4122+
minLength: 0
4123+
description:
4124+
type: string
4125+
validation:
4126+
minLength: 0
4127+
response_timeout_secs:
4128+
type: optional<integer>
4129+
docs: The maximum time in seconds to wait for the tool call to complete.
4130+
default: 20
4131+
parameters:
4132+
type: optional<ObjectJsonSchemaPropertyOutput>
4133+
docs: Schema for any parameters the LLM needs to provide to the MCP tool.
4134+
mcp_tool_name:
4135+
type: string
4136+
docs: The name of the MCP tool to call
4137+
mcp_server_id:
4138+
type: string
4139+
docs: The id of the MCP server to call
4140+
source:
4141+
openapi: openapi.json
40704142
ObjectJsonSchemaPropertyInputPropertiesValue:
40714143
discriminated: false
40724144
union:
@@ -4673,6 +4745,8 @@ types:
46734745
type: ClientToolConfigInput
46744746
mcp:
46754747
type: McpToolConfigInput
4748+
native_mcp:
4749+
type: NativeMcpToolConfigInput
46764750
system:
46774751
type: SystemToolConfigInput
46784752
webhook:
@@ -4707,6 +4781,9 @@ types:
47074781
mcp_server_ids:
47084782
type: optional<list<string>>
47094783
docs: A list of MCP server ids to be used by the agent
4784+
native_mcp_server_ids:
4785+
type: optional<list<string>>
4786+
docs: A list of Native MCP server ids to be used by the agent
47104787
knowledge_base:
47114788
type: optional<list<KnowledgeBaseLocator>>
47124789
docs: A list of knowledge bases to be used by the agent
@@ -4730,6 +4807,8 @@ types:
47304807
type: ClientToolConfigOutput
47314808
mcp:
47324809
type: McpToolConfigOutput
4810+
native_mcp:
4811+
type: NativeMcpToolConfigOutput
47334812
system:
47344813
type: SystemToolConfigOutput
47354814
webhook:
@@ -4745,6 +4824,8 @@ types:
47454824
type: ClientToolConfigInput
47464825
mcp:
47474826
type: McpToolConfigInput
4827+
native_mcp:
4828+
type: NativeMcpToolConfigInput
47484829
system:
47494830
type: SystemToolConfigInput
47504831
webhook:
@@ -4779,6 +4860,9 @@ types:
47794860
mcp_server_ids:
47804861
type: optional<list<string>>
47814862
docs: A list of MCP server ids to be used by the agent
4863+
native_mcp_server_ids:
4864+
type: optional<list<string>>
4865+
docs: A list of Native MCP server ids to be used by the agent
47824866
knowledge_base:
47834867
type: optional<list<KnowledgeBaseLocator>>
47844868
docs: A list of knowledge bases to be used by the agent
@@ -7506,7 +7590,7 @@ types:
75067590
docs: >-
75077591
Optional list of pronunciation dictionary locators. Can only be
75087592
provided in the first message for a given context_id.
7509-
context_id:
7593+
contextId:
75107594
type: optional<string>
75117595
docs: >-
75127596
An identifier for the text-to-speech context. Allows managing multiple
@@ -7515,14 +7599,14 @@ types:
75157599
close_context:
75167600
type: optional<boolean>
75177601
docs: >-
7518-
If true, closes the specified `context_id`. No further audio will be
7602+
If true, closes the specified `contextId`. No further audio will be
75197603
generated for this context. The `text` field is ignored.
75207604
default: false
75217605
close_socket:
75227606
type: optional<boolean>
75237607
docs: >-
75247608
If true, flushes all contexts and closes the entire WebSocket
7525-
connection. The `text` and `context_id` fields are ignored.
7609+
connection. The `text` and `contextId` fields are ignored.
75267610
default: false
75277611
source:
75287612
openapi: asyncapi.yml
@@ -7651,9 +7735,9 @@ types:
76517735
docs: Base64 encoded audio chunk.
76527736
normalizedAlignment: optional<NormalizedAlignment>
76537737
alignment: optional<Alignment>
7654-
context_id:
7738+
contextId:
76557739
type: optional<string>
7656-
docs: The context_id for which this audio is.
7740+
docs: The contextId for which this audio is.
76577741
source:
76587742
openapi: asyncapi.yml
76597743
FinalOutputMulti:
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
imports:
2+
root: ../../__package__.yml
3+
service:
4+
auth: false
5+
base-path: ''
6+
endpoints:
7+
size:
8+
path: /v1/convai/agent/{agent_id}/knowledge-base/size
9+
method: GET
10+
auth: false
11+
docs: Returns the number of pages in the agent's knowledge base.
12+
source:
13+
openapi: openapi.json
14+
path-parameters:
15+
agent_id: string
16+
display-name: Returns The Size Of The Agent'S Knowledge Base
17+
response:
18+
docs: Successful Response
19+
type: root.GetAgentKnowledgebaseSizeResponseModel
20+
status-code: 200
21+
errors:
22+
- root.UnprocessableEntityError
23+
examples:
24+
- path-parameters:
25+
agent_id: agent_id
26+
response:
27+
body:
28+
number_of_pages: 1.1
29+
audiences:
30+
- convai
31+
source:
32+
openapi: openapi.json

.mock/definition/conversationalAi/agents/link.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,10 @@ service:
2929
body:
3030
agent_id: J3Pbu5gP6NNKBscdCdwB
3131
token:
32-
agent_id: J3Pbu5gP6NNKBscdCdwB
32+
agent_id: agent_J3Pbu5gP6NNKBscdCdwB
3333
conversation_token: '1234567890'
3434
expiration_time_unix_secs: 1716153600
35+
conversation_id: conv_J3Pbu5gP6NNKBscdCdwB
3536
purpose: signed_url
3637
audiences:
3738
- convai
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
imports:
2+
root: ../../__package__.yml
3+
service:
4+
auth: false
5+
base-path: ''
6+
endpoints:
7+
calculate:
8+
path: /v1/convai/agent/{agent_id}/llm-usage/calculate
9+
method: POST
10+
auth: false
11+
docs: Calculates expected number of LLM tokens needed for the specified agent.
12+
source:
13+
openapi: openapi.json
14+
path-parameters:
15+
agent_id: string
16+
display-name: Calculate Expected Llm Usage For An Agent
17+
request:
18+
name: LlmUsageCalculatorRequestModel
19+
body:
20+
properties:
21+
prompt_length:
22+
type: optional<integer>
23+
docs: Length of the prompt in characters.
24+
number_of_pages:
25+
type: optional<integer>
26+
docs: >-
27+
Pages of content in pdf documents OR urls in agent's Knowledge
28+
Base.
29+
rag_enabled:
30+
type: optional<boolean>
31+
docs: Whether RAG is enabled.
32+
content-type: application/json
33+
response:
34+
docs: Successful Response
35+
type: root.LlmUsageCalculatorResponseModel
36+
status-code: 200
37+
errors:
38+
- root.UnprocessableEntityError
39+
examples:
40+
- path-parameters:
41+
agent_id: agent_id
42+
request: {}
43+
response:
44+
body:
45+
llm_prices:
46+
- llm: gpt-4o-mini
47+
price_per_minute: 1.1
48+
audiences:
49+
- convai
50+
source:
51+
openapi: openapi.json

0 commit comments

Comments
 (0)