File tree Expand file tree Collapse file tree 1 file changed +3
-2
lines changed Expand file tree Collapse file tree 1 file changed +3
-2
lines changed Original file line number Diff line number Diff line change @@ -248,7 +248,8 @@ func (r *ChatCompletionResponseFormatJSONSchema) UnmarshalJSON(data []byte) erro
248248 return nil
249249}
250250
251- // ChatCompletionRequestExtensions contains third-party OpenAI API extensions (e.g., vendor-specific implementations like vLLM).
251+ // ChatCompletionRequestExtensions contains third-party OpenAI API extensions
252+ // (e.g., vendor-specific implementations like vLLM).
252253type ChatCompletionRequestExtensions struct {
253254 // GuidedChoice is a vLLM-specific extension that restricts the model's output
254255 // to one of the predefined string choices provided in this field. This feature
@@ -264,7 +265,7 @@ type ChatCompletionRequest struct {
264265 Messages []ChatCompletionMessage `json:"messages"`
265266 // MaxTokens The maximum number of tokens that can be generated in the chat completion.
266267 // This value can be used to control costs for text generated via API.
267- // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
268+ // Deprecated: use MaxCompletionTokens. Not compatible with o1- series models.
268269 // refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
269270 MaxTokens int `json:"max_tokens,omitempty"`
270271 // MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
You can’t perform that action at this time.
0 commit comments