Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion src/api/providers/base-openai-compatible-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,10 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
format: "openai",
}) ?? undefined

const temperature = this.options.modelTemperature ?? info.defaultTemperature ?? this.defaultTemperature
const temperature =
info.supportsTemperature === false
? undefined
: (this.options.modelTemperature ?? info.defaultTemperature ?? this.defaultTemperature)

const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model,
Expand Down
6 changes: 4 additions & 2 deletions src/api/providers/openai-compatible.ts
Original file line number Diff line number Diff line change
Expand Up @@ -166,11 +166,12 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si
const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

// Build the request options
const supportsTemp = this.config.modelInfo.supportsTemperature !== false
const requestOptions: Parameters<typeof streamText>[0] = {
model: languageModel,
system: systemPrompt,
messages: aiSdkMessages,
temperature: model.temperature ?? this.config.temperature ?? 0,
temperature: supportsTemp ? (model.temperature ?? this.config.temperature ?? 0) : undefined,
maxOutputTokens: this.getMaxOutputTokens(),
tools: aiSdkTools,
toolChoice: this.mapToolChoice(metadata?.tool_choice),
Expand Down Expand Up @@ -200,11 +201,12 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si
async completePrompt(prompt: string): Promise<string> {
const languageModel = this.getLanguageModel()

const supportsTemp = this.config.modelInfo.supportsTemperature !== false
const { text } = await generateText({
model: languageModel,
prompt,
maxOutputTokens: this.getMaxOutputTokens(),
temperature: this.config.temperature ?? 0,
temperature: supportsTemp ? (this.config.temperature ?? 0) : undefined,
})

return text
Expand Down
5 changes: 4 additions & 1 deletion src/api/providers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,10 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl

const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model: modelId,
temperature: this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
temperature:
modelInfo.supportsTemperature === false
? undefined
: (this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0)),
messages: convertedMessages,
stream: true as const,
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
Expand Down
78 changes: 78 additions & 0 deletions src/api/transform/__tests__/model-params.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -994,4 +994,82 @@ describe("getModelParams", () => {
expect(result.reasoningBudget).toBe(8192) // Default thinking tokens
})
})
describe("supportsTemperature", () => {
it("should set temperature to undefined for openai format when supportsTemperature is false", () => {
const model: ModelInfo = {
...baseModel,
supportsTemperature: false,
}

const result = getModelParams({
...openaiParams,
settings: { modelTemperature: 0.5 },
model,
})

expect(result.temperature).toBeUndefined()
})

it("should keep temperature for openai format when supportsTemperature is true", () => {
const model: ModelInfo = {
...baseModel,
supportsTemperature: true,
}

const result = getModelParams({
...openaiParams,
settings: { modelTemperature: 0.5 },
model,
})

expect(result.temperature).toBe(0.5)
})

it("should keep temperature for openai format when supportsTemperature is undefined", () => {
const result = getModelParams({
...openaiParams,
settings: { modelTemperature: 0.5 },
model: baseModel,
})

expect(result.temperature).toBe(0.5)
})

it("should set temperature to undefined for openrouter format when supportsTemperature is false", () => {
const model: ModelInfo = {
...baseModel,
supportsTemperature: false,
}

const result = getModelParams({
...openrouterParams,
settings: { modelTemperature: 0.5 },
model,
})

expect(result.temperature).toBeUndefined()
})

it("should still remove temperature for o1 model IDs in openai format", () => {
const result = getModelParams({
...openaiParams,
modelId: "o1-preview",
settings: {},
model: baseModel,
})

expect(result.temperature).toBeUndefined()
})

it("should still remove temperature for o3-mini model IDs in openai format", () => {
const result = getModelParams({
...openaiParams,
modelId: "o3-mini",
settings: {},
model: baseModel,
})

expect(result.temperature).toBeUndefined()
})
})
})
14 changes: 5 additions & 9 deletions src/api/transform/model-params.ts
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,9 @@ export function getModelParams({
reasoning: getAnthropicReasoning({ model, reasoningBudget, reasoningEffort, settings }),
}
} else if (format === "openai") {
// Special case for o1 and o3-mini, which don't support temperature.
// TODO: Add a `supportsTemperature` field to the model info.
if (modelId.startsWith("o1") || modelId.startsWith("o3-mini")) {
// Omit temperature for models that don't support it (e.g. o1, o3-mini,
// claude-opus-4-7 proxied via OpenAI-compatible gateways).
if (model.supportsTemperature === false || modelId.startsWith("o1") || modelId.startsWith("o3-mini")) {
params.temperature = undefined
}

Expand All @@ -172,12 +172,8 @@ export function getModelParams({
reasoning: getGeminiReasoning({ model, reasoningBudget, reasoningEffort, settings }),
}
} else {
// Special case for o1-pro, which doesn't support temperature.
// Note that OpenRouter's `supported_parameters` field includes
// `temperature`, which is probably a bug.
// TODO: Add a `supportsTemperature` field to the model info and populate
// it appropriately in the OpenRouter fetcher.
if (modelId === "openai/o1-pro") {
// Omit temperature for models that don't support it.
if (model.supportsTemperature === false || modelId === "openai/o1-pro") {
params.temperature = undefined
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -412,6 +412,32 @@ export const OpenAICompatible = ({
</div>
</div>

<div>
<div className="flex items-center gap-1">
<Checkbox
checked={apiConfiguration?.openAiCustomModelInfo?.supportsTemperature ?? true}
onChange={handleInputChange("openAiCustomModelInfo", (checked) => {
return {
...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults),
supportsTemperature: checked,
}
})}>
<span className="font-medium">
{t("settings:providers.customModel.temperatureSupport.label")}
</span>
</Checkbox>
<StandardTooltip content={t("settings:providers.customModel.temperatureSupport.description")}>
<i
className="codicon codicon-info text-vscode-descriptionForeground"
style={{ fontSize: "12px" }}
/>
</StandardTooltip>
</div>
<div className="text-sm text-vscode-descriptionForeground pt-1">
{t("settings:providers.customModel.temperatureSupport.description")}
</div>
</div>

<div>
<VSCodeTextField
value={
Expand Down
4 changes: 4 additions & 0 deletions webview-ui/src/i18n/locales/en/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -567,6 +567,10 @@
"label": "Prompt Caching",
"description": "Is this model capable of caching prompts?"
},
"temperatureSupport": {
"label": "Supports Temperature",
"description": "Does this model accept a temperature parameter? Uncheck for models like Claude Opus 4.7 that have deprecated temperature."
},
"pricing": {
"input": {
"label": "Input Price",
Expand Down
Loading