Skip to content

Commit 65e9a17

Browse files
authored
[open ai] Rename deploymentOrModelName to deploymentName (Azure#26696)
1 parent 4352bd0 commit 65e9a17

File tree

2 files changed

+25
-25
lines changed

2 files changed

+25
-25
lines changed

sdk/openai/openai/review/openai.api.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -197,12 +197,12 @@ export class OpenAIClient {
197197
constructor(openAiApiKey: KeyCredential, options?: OpenAIClientOptions);
198198
beginAzureBatchImageGeneration(prompt: string, options?: ImageGenerationOptions): Promise<ImageGenerationResponse>;
199199
getAzureBatchImageGenerationOperationStatus(operationId: string, options?: GetAzureBatchImageGenerationOperationStatusOptions): Promise<ImageGenerationResponse>;
200-
getChatCompletions(deploymentOrModelName: string, messages: ChatMessage[], options?: GetChatCompletionsOptions): Promise<ChatCompletions>;
201-
getCompletions(deploymentOrModelName: string, prompt: string[], options?: GetCompletionsOptions): Promise<Completions>;
202-
getEmbeddings(deploymentOrModelName: string, input: string[], options?: GetEmbeddingsOptions): Promise<Embeddings>;
200+
getChatCompletions(deploymentName: string, messages: ChatMessage[], options?: GetChatCompletionsOptions): Promise<ChatCompletions>;
201+
getCompletions(deploymentName: string, prompt: string[], options?: GetCompletionsOptions): Promise<Completions>;
202+
getEmbeddings(deploymentName: string, input: string[], options?: GetEmbeddingsOptions): Promise<Embeddings>;
203203
getImages(prompt: string, options?: ImageGenerationOptions): Promise<ImageGenerationResponse>;
204-
listChatCompletions(deploymentOrModelName: string, messages: ChatMessage[], options?: GetChatCompletionsOptions): Promise<AsyncIterable<Omit<ChatCompletions, "usage">>>;
205-
listCompletions(deploymentOrModelName: string, prompt: string[], options?: GetCompletionsOptions): Promise<AsyncIterable<Omit<Completions, "usage">>>;
204+
listChatCompletions(deploymentName: string, messages: ChatMessage[], options?: GetChatCompletionsOptions): Promise<AsyncIterable<Omit<ChatCompletions, "usage">>>;
205+
listCompletions(deploymentName: string, prompt: string[], options?: GetCompletionsOptions): Promise<AsyncIterable<Omit<Completions, "usage">>>;
206206
}
207207

208208
// @public (undocumented)

sdk/openai/openai/src/OpenAIClient.ts

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -157,34 +157,34 @@ export class OpenAIClient {
157157

158158
/**
159159
* Returns textual completions as configured for a given prompt.
160-
* @param deploymentOrModelName - Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
160+
* @param deploymentName - Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
161161
* @param prompt - The prompt to use for this request.
162162
* @param options - The options for this completions request.
163163
* @returns The completions for the given prompt.
164164
*/
165165
getCompletions(
166-
deploymentOrModelName: string,
166+
deploymentName: string,
167167
prompt: string[],
168168
options: GetCompletionsOptions = { requestOptions: {} }
169169
): Promise<Completions> {
170-
this.setModel(deploymentOrModelName, options);
171-
return getCompletions(this._client, prompt, deploymentOrModelName, options);
170+
this.setModel(deploymentName, options);
171+
return getCompletions(this._client, prompt, deploymentName, options);
172172
}
173173

174174
/**
175175
* Lists the completions tokens as they become available for a given prompt.
176-
* @param deploymentOrModelName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
176+
* @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
177177
* @param prompt - The prompt to use for this request.
178178
* @param options - The completions options for this completions request.
179179
* @returns An asynchronous iterable of completions tokens.
180180
*/
181181
listCompletions(
182-
deploymentOrModelName: string,
182+
deploymentName: string,
183183
prompt: string[],
184184
options: GetCompletionsOptions = {}
185185
): Promise<AsyncIterable<Omit<Completions, "usage">>> {
186-
this.setModel(deploymentOrModelName, options);
187-
const response = _getCompletionsSend(this._client, prompt, deploymentOrModelName, {
186+
this.setModel(deploymentName, options);
187+
const response = _getCompletionsSend(this._client, prompt, deploymentName, {
188188
...options,
189189
stream: true,
190190
});
@@ -193,50 +193,50 @@ export class OpenAIClient {
193193

194194
/**
195195
* Return the computed embeddings for a given prompt.
196-
* @param deploymentOrModelName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
196+
* @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
197197
* @param input - The prompt to use for this request.
198198
* @param options - The embeddings options for this embeddings request.
199199
* @returns The embeddings for the given prompt.
200200
*/
201201
getEmbeddings(
202-
deploymentOrModelName: string,
202+
deploymentName: string,
203203
input: string[],
204204
options: GetEmbeddingsOptions = { requestOptions: {} }
205205
): Promise<Embeddings> {
206-
this.setModel(deploymentOrModelName, options);
207-
return getEmbeddings(this._client, input, deploymentOrModelName, options);
206+
this.setModel(deploymentName, options);
207+
return getEmbeddings(this._client, input, deploymentName, options);
208208
}
209209

210210
/**
211211
* Get chat completions for provided chat context messages.
212-
* @param deploymentOrModelName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
212+
* @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
213213
* @param messages - The chat context messages to use for this request.
214214
* @param options - The chat completions options for this completions request.
215215
* @returns The chat completions for the given chat context messages.
216216
*/
217217
getChatCompletions(
218-
deploymentOrModelName: string,
218+
deploymentName: string,
219219
messages: ChatMessage[],
220220
options: GetChatCompletionsOptions = { requestOptions: {} }
221221
): Promise<ChatCompletions> {
222-
this.setModel(deploymentOrModelName, options);
223-
return getChatCompletions(this._client, messages, deploymentOrModelName, options);
222+
this.setModel(deploymentName, options);
223+
return getChatCompletions(this._client, messages, deploymentName, options);
224224
}
225225

226226
/**
227227
* Lists the chat completions tokens as they become available for a chat context.
228-
* @param deploymentOrModelName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
228+
* @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
229229
* @param messages - The chat context messages to use for this request.
230230
* @param options - The chat completions options for this chat completions request.
231231
* @returns An asynchronous iterable of chat completions tokens.
232232
*/
233233
listChatCompletions(
234-
deploymentOrModelName: string,
234+
deploymentName: string,
235235
messages: ChatMessage[],
236236
options: GetChatCompletionsOptions = { requestOptions: {} }
237237
): Promise<AsyncIterable<Omit<ChatCompletions, "usage">>> {
238-
this.setModel(deploymentOrModelName, options);
239-
const response = _getChatCompletionsSend(this._client, messages, deploymentOrModelName, {
238+
this.setModel(deploymentName, options);
239+
const response = _getChatCompletionsSend(this._client, messages, deploymentName, {
240240
...options,
241241
stream: true,
242242
});

0 commit comments

Comments
 (0)