Skip to content

Commit 82aa8aa

Browse files
authored
🤖 feat: update models to GPT-5.2 (#1103)
Updates the model definitions for the newly released GPT-5.2. ## Changes - **GPT base model**: Updated from `gpt-5.1` to `gpt-5.2` - **GPT-5.2 Pro**: Added new model with `medium`, `high`, `xhigh` reasoning levels - **Codex models**: Remain on 5.1 (`gpt-5.1-codex`, `gpt-5.1-codex-mini`, `gpt-5.1-codex-max`) - **Model data**: Added pricing/context info for `gpt-5.2` and `gpt-5.2-pro` in models-extra.ts - **Thinking policy**: Updated for gpt-5.2-pro's 3-level reasoning support - **Aliases**: `gpt` → `openai:gpt-5.2`, `gpt-pro` → `openai:gpt-5.2-pro` _Generated with `mux`_
1 parent 4740e25 commit 82aa8aa

File tree

11 files changed

+2587
-47
lines changed

11 files changed

+2587
-47
lines changed

docs/models.mdx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@ mux ships with a curated set of first-class models that we keep up to date with
2020
| Opus 4.5 | anthropic:claude-opus-4-5 | `opus` ||
2121
| Sonnet 4.5 | anthropic:claude-sonnet-4-5 | `sonnet` | |
2222
| Haiku 4.5 | anthropic:claude-haiku-4-5 | `haiku` | |
23-
| GPT-5.1 | openai:gpt-5.1 | `gpt-5.1` | |
24-
| GPT-5 Pro | openai:gpt-5-pro | `gpt-5-pro` | |
23+
| GPT-5.2 | openai:gpt-5.2 | `gpt` | |
24+
| GPT-5.2 Pro | openai:gpt-5.2-pro | `gpt-pro` | |
2525
| GPT-5.1 Codex | openai:gpt-5.1-codex | `codex` | |
2626
| GPT-5.1 Codex Mini | openai:gpt-5.1-codex-mini | `codex-mini` | |
2727
| GPT-5.1 Codex Max | openai:gpt-5.1-codex-max | `codex-max` | |

src/browser/utils/thinking/policy.test.ts

Lines changed: 32 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,44 @@ describe("getThinkingPolicyForModel", () => {
5252
]);
5353
});
5454

55-
test("returns single HIGH for gpt-5-pro base model", () => {
55+
test("returns medium/high/xhigh for gpt-5.2-pro", () => {
56+
expect(getThinkingPolicyForModel("openai:gpt-5.2-pro")).toEqual(["medium", "high", "xhigh"]);
57+
});
58+
59+
test("returns medium/high/xhigh for gpt-5.2-pro behind mux-gateway", () => {
60+
expect(getThinkingPolicyForModel("mux-gateway:openai/gpt-5.2-pro")).toEqual([
61+
"medium",
62+
"high",
63+
"xhigh",
64+
]);
65+
});
66+
67+
test("returns 5 levels including xhigh for gpt-5.1-codex-max behind mux-gateway", () => {
68+
expect(getThinkingPolicyForModel("mux-gateway:openai/gpt-5.1-codex-max")).toEqual([
69+
"off",
70+
"low",
71+
"medium",
72+
"high",
73+
"xhigh",
74+
]);
75+
});
76+
test("returns medium/high/xhigh for gpt-5.2-pro with version suffix", () => {
77+
expect(getThinkingPolicyForModel("openai:gpt-5.2-pro-2025-12-11")).toEqual([
78+
"medium",
79+
"high",
80+
"xhigh",
81+
]);
82+
});
83+
84+
test("returns single HIGH for gpt-5-pro base model (legacy)", () => {
5685
expect(getThinkingPolicyForModel("openai:gpt-5-pro")).toEqual(["high"]);
5786
});
5887

59-
test("returns single HIGH for gpt-5-pro with version suffix", () => {
88+
test("returns single HIGH for gpt-5-pro with version suffix (legacy)", () => {
6089
expect(getThinkingPolicyForModel("openai:gpt-5-pro-2025-10-06")).toEqual(["high"]);
6190
});
6291

63-
test("returns single HIGH for gpt-5-pro with whitespace after colon", () => {
92+
test("returns single HIGH for gpt-5-pro with whitespace after colon (legacy)", () => {
6493
expect(getThinkingPolicyForModel("openai: gpt-5-pro")).toEqual(["high"]);
6594
});
6695

src/browser/utils/thinking/policy.ts

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,30 +25,43 @@ export type ThinkingPolicy = readonly ThinkingLevel[];
2525
*
2626
* Rules:
2727
* - openai:gpt-5.1-codex-max → ["off", "low", "medium", "high", "xhigh"] (5 levels including xhigh)
28-
* - openai:gpt-5-pro → ["high"] (only supported level)
28+
* - openai:gpt-5.2-pro → ["medium", "high", "xhigh"] (3 levels)
29+
* - openai:gpt-5-pro → ["high"] (only supported level, legacy)
2930
* - gemini-3 → ["low", "high"] (thinking level only)
3031
* - default → ["off", "low", "medium", "high"] (standard 4 levels)
3132
*
3233
* Tolerates version suffixes (e.g., gpt-5-pro-2025-10-06).
3334
* Does NOT match gpt-5-pro-mini (uses negative lookahead).
3435
*/
3536
export function getThinkingPolicyForModel(modelString: string): ThinkingPolicy {
36-
// Normalize to be robust to provider prefixes, whitespace, and version suffixes
37+
// Normalize to be robust to provider prefixes, whitespace, gateway wrappers, and version suffixes
3738
const normalized = modelString.trim().toLowerCase();
3839
const withoutPrefix = normalized.replace(/^[a-z0-9_-]+:\s*/, "");
3940

41+
// Many providers/proxies encode the upstream provider as a path segment:
42+
// mux-gateway:openai/gpt-5.2-pro -> openai/gpt-5.2-pro -> gpt-5.2-pro
43+
const withoutProviderNamespace = withoutPrefix.replace(/^[a-z0-9_-]+\//, "");
44+
4045
// GPT-5.1-Codex-Max supports 5 reasoning levels including xhigh (Extra High)
41-
if (withoutPrefix.startsWith("gpt-5.1-codex-max") || withoutPrefix.startsWith("codex-max")) {
46+
if (
47+
withoutProviderNamespace.startsWith("gpt-5.1-codex-max") ||
48+
withoutProviderNamespace.startsWith("codex-max")
49+
) {
4250
return ["off", "low", "medium", "high", "xhigh"];
4351
}
4452

45-
// gpt-5-pro (not mini) with optional version suffix
46-
if (/^gpt-5-pro(?!-[a-z])/.test(withoutPrefix)) {
53+
// gpt-5.2-pro supports medium, high, xhigh reasoning levels
54+
if (/^gpt-5\.2-pro(?!-[a-z])/.test(withoutProviderNamespace)) {
55+
return ["medium", "high", "xhigh"];
56+
}
57+
58+
// gpt-5-pro (legacy) only supports high
59+
if (/^gpt-5-pro(?!-[a-z])/.test(withoutProviderNamespace)) {
4760
return ["high"];
4861
}
4962

5063
// Gemini 3 Pro only supports "low" and "high" reasoning levels
51-
if (withoutPrefix.includes("gemini-3")) {
64+
if (withoutProviderNamespace.includes("gemini-3")) {
5265
return ["low", "high"];
5366
}
5467

src/common/constants/knownModels.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,15 +48,15 @@ const MODEL_DEFINITIONS = {
4848
},
4949
GPT: {
5050
provider: "openai",
51-
providerModelId: "gpt-5.1",
52-
aliases: ["gpt-5.1"],
51+
providerModelId: "gpt-5.2",
52+
aliases: ["gpt"],
5353
warm: true,
5454
tokenizerOverride: "openai/gpt-5",
5555
},
5656
GPT_PRO: {
5757
provider: "openai",
58-
providerModelId: "gpt-5-pro",
59-
aliases: ["gpt-5-pro"],
58+
providerModelId: "gpt-5.2-pro",
59+
aliases: ["gpt-pro"],
6060
},
6161
GPT_CODEX: {
6262
provider: "openai",

src/common/utils/ai/cacheStrategy.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ describe("cacheStrategy", () => {
2626
expect(supportsAnthropicCache("openai:gpt-4")).toBe(false);
2727
expect(supportsAnthropicCache("google:gemini-2.0")).toBe(false);
2828
expect(supportsAnthropicCache("openrouter:meta-llama/llama-3.1")).toBe(false);
29-
expect(supportsAnthropicCache("mux-gateway:openai/gpt-5.1")).toBe(false);
29+
expect(supportsAnthropicCache("mux-gateway:openai/gpt-5.2")).toBe(false);
3030
});
3131
});
3232

src/common/utils/tokens/displayUsage.test.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ describe("createDisplayUsage", () => {
159159
};
160160

161161
test("subtracts cached tokens for direct OpenAI model", () => {
162-
const result = createDisplayUsage(openAIUsage, "openai:gpt-5.1");
162+
const result = createDisplayUsage(openAIUsage, "openai:gpt-5.2");
163163

164164
expect(result).toBeDefined();
165165
expect(result!.cached.tokens).toBe(71600);
@@ -169,7 +169,7 @@ describe("createDisplayUsage", () => {
169169

170170
test("subtracts cached tokens for gateway OpenAI model", () => {
171171
// Gateway format: mux-gateway:openai/model-name
172-
const result = createDisplayUsage(openAIUsage, "mux-gateway:openai/gpt-5.1");
172+
const result = createDisplayUsage(openAIUsage, "mux-gateway:openai/gpt-5.2");
173173

174174
expect(result).toBeDefined();
175175
expect(result!.cached.tokens).toBe(71600);
@@ -245,7 +245,7 @@ describe("createDisplayUsage", () => {
245245
});
246246

247247
test("returns undefined for undefined usage", () => {
248-
expect(createDisplayUsage(undefined, "openai:gpt-5.1")).toBeUndefined();
248+
expect(createDisplayUsage(undefined, "openai:gpt-5.2")).toBeUndefined();
249249
});
250250

251251
test("handles zero cached tokens", () => {
@@ -256,7 +256,7 @@ describe("createDisplayUsage", () => {
256256
cachedInputTokens: 0,
257257
};
258258

259-
const result = createDisplayUsage(usage, "openai:gpt-5.1");
259+
const result = createDisplayUsage(usage, "openai:gpt-5.2");
260260

261261
expect(result).toBeDefined();
262262
expect(result!.input.tokens).toBe(1000);
@@ -270,7 +270,7 @@ describe("createDisplayUsage", () => {
270270
totalTokens: 1500,
271271
};
272272

273-
const result = createDisplayUsage(usage, "openai:gpt-5.1");
273+
const result = createDisplayUsage(usage, "openai:gpt-5.2");
274274

275275
expect(result).toBeDefined();
276276
expect(result!.input.tokens).toBe(1000);

src/common/utils/tokens/displayUsage.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ export function createDisplayUsage(
3030
const cachedTokens = usage.cachedInputTokens ?? 0;
3131
const rawInputTokens = usage.inputTokens ?? 0;
3232

33-
// Normalize gateway models (e.g., "mux-gateway:openai/gpt-5.1" → "openai:gpt-5.1")
33+
// Normalize gateway models (e.g., "mux-gateway:openai/gpt-5.2" → "openai:gpt-5.2")
3434
// before detecting provider, so gateway-routed requests get correct handling
3535
const normalizedModel = normalizeGatewayModel(model);
3636

src/common/utils/tokens/modelStats.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ describe("getModelStats", () => {
110110

111111
describe("model without provider prefix", () => {
112112
test("should handle model string without provider", () => {
113-
const stats = getModelStats("gpt-5.1");
113+
const stats = getModelStats("gpt-5.2");
114114
expect(stats).not.toBeNull();
115115
expect(stats?.max_input_tokens).toBeGreaterThan(0);
116116
});

src/common/utils/tokens/models-extra.ts

Lines changed: 27 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -40,21 +40,40 @@ export const modelsExtra: Record<string, ModelData> = {
4040
supports_response_schema: true,
4141
},
4242

43-
// GPT-5 Pro - Released October 6, 2025 at DevDay
44-
// $15/M input, $120/M output
45-
// Only available via OpenAI's Responses API
46-
"gpt-5-pro": {
43+
// GPT-5.2 - Released December 11, 2025
44+
// $1.75/M input, $14/M output
45+
// Cached input: $0.175/M
46+
"gpt-5.2": {
4747
max_input_tokens: 400000,
48-
max_output_tokens: 272000,
49-
input_cost_per_token: 0.000015, // $15 per million input tokens
50-
output_cost_per_token: 0.00012, // $120 per million output tokens
48+
max_output_tokens: 128000,
49+
input_cost_per_token: 0.00000175, // $1.75 per million input tokens
50+
output_cost_per_token: 0.000014, // $14 per million output tokens
51+
// OpenAI model page lists "cached input" pricing, which corresponds to prompt cache reads.
52+
cache_read_input_token_cost: 0.000000175, // $0.175 per million cached input tokens
53+
litellm_provider: "openai",
54+
mode: "chat",
55+
supports_function_calling: true,
56+
supports_vision: true,
57+
supports_reasoning: true,
58+
supports_response_schema: true,
59+
knowledge_cutoff: "2025-08-31",
60+
},
61+
62+
// GPT-5.2 Pro - Released December 11, 2025
63+
// $21/M input, $168/M output
64+
// Supports medium, high, xhigh reasoning levels
65+
"gpt-5.2-pro": {
66+
max_input_tokens: 400000,
67+
max_output_tokens: 128000,
68+
input_cost_per_token: 0.000021, // $21 per million input tokens
69+
output_cost_per_token: 0.000168, // $168 per million output tokens
70+
knowledge_cutoff: "2025-08-31",
5171
litellm_provider: "openai",
5272
mode: "chat",
5373
supports_function_calling: true,
5474
supports_vision: true,
5575
supports_reasoning: true,
5676
supports_response_schema: true,
57-
knowledge_cutoff: "2024-09-30",
5877
supported_endpoints: ["/v1/responses"],
5978
},
6079

0 commit comments

Comments
 (0)