Skip to content

Commit 0b49129

Browse files
committed
fix: handle system messages for non-Anthropic providers
- Add systemMessage capability schema ('single' | 'multiple') - Add getSystemMessageMode helper with provider-aware defaults - Anthropic keeps multiple system messages; all others merge into one - Add reasoning_content support for DeepSeek/oMLX models - Merge system messages in convert layer for OpenAI-compatible APIs - Handle lone system message edge case (convert to user message) Fixes 'Chat template error' for Qwen, Llama, Ollama, etc. Closes #5034
1 parent 0317206 commit 0b49129

File tree

4 files changed

+60
-16
lines changed

4 files changed

+60
-16
lines changed

packages/opencode/src/provider/provider.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -845,6 +845,7 @@ const ProviderCapabilities = Schema.Struct({
845845
input: ProviderModalities,
846846
output: ProviderModalities,
847847
interleaved: ProviderInterleaved,
848+
systemMessage: Schema.Union([Schema.Literal("single"), Schema.Literal("multiple")]).optional(),
848849
})
849850

850851
const ProviderCacheCost = Schema.Struct({
@@ -1007,6 +1008,7 @@ function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model
10071008
pdf: model.modalities?.output?.includes("pdf") ?? false,
10081009
},
10091010
interleaved: model.interleaved ?? false,
1011+
systemMessage: undefined,
10101012
},
10111013
release_date: model.release_date ?? "",
10121014
variants: {},
@@ -1171,7 +1173,7 @@ const layer: Layer.Layer<
11711173
pdf: model.modalities?.input?.includes("pdf") ?? existingModel?.capabilities.input.pdf ?? false,
11721174
},
11731175
output: {
1174-
text: model.modalities?.output?.includes("text") ?? existingModel?.capabilities.output.text ?? true,
1176+
text: model.modalities?.output?.includes("text") ?? true,
11751177
audio:
11761178
model.modalities?.output?.includes("audio") ?? existingModel?.capabilities.output.audio ?? false,
11771179
image:
@@ -1181,6 +1183,7 @@ const layer: Layer.Layer<
11811183
pdf: model.modalities?.output?.includes("pdf") ?? existingModel?.capabilities.output.pdf ?? false,
11821184
},
11831185
interleaved: model.interleaved ?? false,
1186+
systemMessage: undefined,
11841187
},
11851188
cost: {
11861189
input: model?.cost?.input ?? existingModel?.cost?.input ?? 0,

packages/opencode/src/provider/sdk/copilot/chat/convert-to-openai-compatible-chat-messages.ts

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,35 @@ function getOpenAIMetadata(message: { providerOptions?: SharedV3ProviderOptions
1212

1313
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV3Prompt): OpenAICompatibleChatPrompt {
1414
const messages: OpenAICompatibleChatPrompt = []
15+
16+
const systemPrompt: string[] = []
17+
for (const { role, content } of prompt) {
18+
if (role === "system") {
19+
systemPrompt.push(content)
20+
}
21+
}
22+
23+
const hasSystem = systemPrompt.length > 0
24+
const hasOthers = prompt.some((m) => m.role !== "system")
25+
26+
if (hasSystem) {
27+
if (hasOthers) {
28+
messages.push({
29+
role: "system",
30+
content: systemPrompt.join("\n\n"),
31+
})
32+
} else {
33+
messages.push({
34+
role: "user",
35+
content: systemPrompt.join("\n\n"),
36+
})
37+
}
38+
}
39+
1540
for (const { role, content, ...message } of prompt) {
1641
const metadata = getOpenAIMetadata({ ...message })
1742
switch (role) {
1843
case "system": {
19-
messages.push({
20-
role: "system",
21-
content: content,
22-
...metadata,
23-
})
2444
break
2545
}
2646

packages/opencode/src/provider/sdk/copilot/chat/openai-compatible-chat-language-model.ts

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -226,8 +226,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
226226
})
227227
}
228228

229-
// reasoning content (Copilot uses reasoning_text):
230-
const reasoning = choice.message.reasoning_text
229+
// reasoning content (Copilot uses reasoning_text, DeepSeek/oMLX uses reasoning_content):
230+
const reasoning = choice.message.reasoning_text ?? choice.message.reasoning_content
231231
if (reasoning != null && reasoning.length > 0) {
232232
content.push({
233233
type: "reasoning",
@@ -477,8 +477,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
477477
reasoningOpaque = delta.reasoning_opaque
478478
}
479479

480-
// enqueue reasoning before text deltas (Copilot uses reasoning_text):
481-
const reasoningContent = delta.reasoning_text
480+
// enqueue reasoning before text deltas (Copilot uses reasoning_text, DeepSeek/oMLX uses reasoning_content):
481+
const reasoningContent = delta.reasoning_text ?? delta.reasoning_content
482482
if (reasoningContent) {
483483
if (!isActiveReasoning) {
484484
controller.enqueue({
@@ -757,6 +757,7 @@ const OpenAICompatibleChatResponseSchema = z.object({
757757
// Copilot-specific reasoning fields
758758
reasoning_text: z.string().nullish(),
759759
reasoning_opaque: z.string().nullish(),
760+
reasoning_content: z.string().nullish(),
760761
tool_calls: z
761762
.array(
762763
z.object({
@@ -792,6 +793,7 @@ const createOpenAICompatibleChatChunkSchema = <ERROR_SCHEMA extends z.core.$ZodT
792793
// Copilot-specific reasoning fields
793794
reasoning_text: z.string().nullish(),
794795
reasoning_opaque: z.string().nullish(),
796+
reasoning_content: z.string().nullish(),
795797
tool_calls: z
796798
.array(
797799
z.object({

packages/opencode/src/session/llm.ts

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,23 @@ const log = Log.create({ service: "llm" })
2929
export const OUTPUT_TOKEN_MAX = ProviderTransform.OUTPUT_TOKEN_MAX
3030
type Result = Awaited<ReturnType<typeof streamText>>
3131

32+
function getSystemMessageMode(model: Provider.Model): "single" | "multiple" {
33+
if (model.capabilities.systemMessage) {
34+
return model.capabilities.systemMessage
35+
}
36+
const providerDefaults: Record<string, "single" | "multiple"> = {
37+
anthropic: "multiple",
38+
}
39+
const mode = providerDefaults[model.providerID] ?? "single"
40+
if (!providerDefaults[model.providerID]) {
41+
log.info("Using default 'single' systemMessage mode for provider", {
42+
providerID: model.providerID,
43+
modelID: model.id,
44+
})
45+
}
46+
return mode
47+
}
48+
3249
export type StreamInput = {
3350
user: MessageV2.User
3451
sessionID: string
@@ -150,12 +167,14 @@ const live: Layer.Layer<
150167
: isWorkflow
151168
? input.messages
152169
: [
153-
...system.map(
154-
(x): ModelMessage => ({
155-
role: "system",
156-
content: x,
157-
}),
158-
),
170+
...(getSystemMessageMode(input.model) === "multiple"
171+
? system.map(
172+
(x): ModelMessage => ({
173+
role: "system",
174+
content: x,
175+
}),
176+
)
177+
: ([{ role: "system", content: system.join("\n") }] as ModelMessage[])),
159178
...input.messages,
160179
]
161180

0 commit comments

Comments
 (0)