Skip to content

Commit ef0e0a7

Browse files
committed
Evolve providers to support any provider type with shared model defaults
The providers config section was previously limited to OpenAI-compatible endpoints. This evolves it into a universal provider grouping mechanism that works with any provider type (openai, anthropic, google, amazon-bedrock, etc.) and supports shared model-level defaults. Code changes: - Add 'provider' field to ProviderConfig (defaults to 'openai' for backward compatibility) - Add model-level defaults: temperature, max_tokens, thinking_budget, top_p, frequency/presence_penalty, parallel_tool_calls, track_usage, provider_opts - Update applyProviderDefaults to propagate all new fields with model-level overrides taking precedence - Make base_url required only for OpenAI-compatible providers - Update validation, env var gathering, schema, and examples Documentation: - Rewrite providers/custom doc as 'Provider Definitions' covering all provider types with inheritance examples - Add 'Inheriting from Provider Definitions' section to model config docs - Update configuration overview, nav, and introduction Assisted-By: docker-agent
1 parent 185f276 commit ef0e0a7

14 files changed

Lines changed: 552 additions & 103 deletions

File tree

agent-schema.json

Lines changed: 73 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,21 @@
7777
"definitions": {
7878
"ProviderConfig": {
7979
"type": "object",
80-
"description": "Configuration for a custom model provider. Can be used for custom gateways",
80+
"description": "Configuration for a model provider. Defines reusable defaults that models can inherit by referencing the provider name. Supports any provider type (openai, anthropic, google, amazon-bedrock, etc.).",
8181
"properties": {
82+
"provider": {
83+
"type": "string",
84+
"description": "The underlying provider type. Defaults to \"openai\" when not set. Supported values: openai, anthropic, google, amazon-bedrock, dmr, and any built-in alias (requesty, azure, xai, ollama, mistral, etc.).",
85+
"examples": [
86+
"openai",
87+
"anthropic",
88+
"google",
89+
"amazon-bedrock"
90+
]
91+
},
8292
"api_type": {
8393
"type": "string",
84-
"description": "The API schema type to use. Determines which API schema to use.",
94+
"description": "The API schema type to use. Only applicable for OpenAI-compatible providers.",
8595
"enum": [
8696
"openai_chatcompletions",
8797
"openai_responses"
@@ -94,23 +104,75 @@
94104
},
95105
"base_url": {
96106
"type": "string",
97-
"description": "Base URL for the provider's API endpoint (required)",
107+
"description": "Base URL for the provider's API endpoint. Required for OpenAI-compatible providers, optional for native providers.",
98108
"format": "uri",
99109
"examples": [
100110
"https://router.example.com/v1"
101111
]
102112
},
103113
"token_key": {
104114
"type": "string",
105-
"description": "Environment variable name containing the API token. If not set, requests will be sent without authentication.",
115+
"description": "Environment variable name containing the API token. If not set, requests will use the default token for the provider type.",
106116
"examples": [
107-
"CUSTOM_PROVIDER_API_KEY"
117+
"CUSTOM_PROVIDER_API_KEY",
118+
"ANTHROPIC_API_KEY"
119+
]
120+
},
121+
"temperature": {
122+
"type": "number",
123+
"description": "Default sampling temperature for models using this provider.",
124+
"minimum": 0,
125+
"maximum": 2
126+
},
127+
"max_tokens": {
128+
"type": "integer",
129+
"description": "Default maximum number of tokens for models using this provider."
130+
},
131+
"top_p": {
132+
"type": "number",
133+
"description": "Default top-p (nucleus) sampling parameter.",
134+
"minimum": 0,
135+
"maximum": 1
136+
},
137+
"frequency_penalty": {
138+
"type": "number",
139+
"description": "Default frequency penalty.",
140+
"minimum": -2,
141+
"maximum": 2
142+
},
143+
"presence_penalty": {
144+
"type": "number",
145+
"description": "Default presence penalty.",
146+
"minimum": -2,
147+
"maximum": 2
148+
},
149+
"parallel_tool_calls": {
150+
"type": "boolean",
151+
"description": "Whether to enable parallel tool calls by default."
152+
},
153+
"provider_opts": {
154+
"type": "object",
155+
"description": "Provider-specific options passed through to the underlying client.",
156+
"additionalProperties": true
157+
},
158+
"track_usage": {
159+
"type": "boolean",
160+
"description": "Whether to track token usage by default."
161+
},
162+
"thinking_budget": {
163+
"description": "Default reasoning effort/budget for models using this provider. Can be an integer token count or a string effort level.",
164+
"oneOf": [
165+
{
166+
"type": "integer",
167+
"description": "Token budget for reasoning"
168+
},
169+
{
170+
"type": "string",
171+
"description": "Effort level (e.g., \"low\", \"medium\", \"high\", \"none\", \"adaptive\")"
172+
}
108173
]
109174
}
110175
},
111-
"required": [
112-
"base_url"
113-
],
114176
"additionalProperties": false
115177
},
116178
"AgentConfig": {
@@ -359,7 +421,7 @@
359421
"cooldown": {
360422
"type": "string",
361423
"description": "Duration to stick with a successful fallback model before retrying the primary. Only applies after a non-retryable error (e.g., 429 rate limit). Use Go duration format (e.g., '1m', '30s', '2m30s'). Default is '1m'.",
362-
"pattern": "^([0-9]+(ns|us|µs|ms|s|m|h))+$",
424+
"pattern": "^([0-9]+(ns|us|\u00b5s|ms|s|m|h))+$",
363425
"default": "1m",
364426
"examples": [
365427
"1m",
@@ -758,7 +820,7 @@
758820
},
759821
"instruction": {
760822
"type": "string",
761-
"description": "Custom instruction for this MCP server's tools. By default, setting this field replaces the toolset's built-in instructions entirely. To enrich (rather than replace) the original instructions, include the placeholder {ORIGINAL_INSTRUCTIONS} in your text it will be substituted with the toolset's built-in instructions at runtime. For example: '{ORIGINAL_INSTRUCTIONS}\nAlways prefer JSON output.' will prepend the original instructions and append your extra guidance."
823+
"description": "Custom instruction for this MCP server's tools. By default, setting this field replaces the toolset's built-in instructions entirely. To enrich (rather than replace) the original instructions, include the placeholder {ORIGINAL_INSTRUCTIONS} in your text \u2014 it will be substituted with the toolset's built-in instructions at runtime. For example: '{ORIGINAL_INSTRUCTIONS}\nAlways prefer JSON output.' will prepend the original instructions and append your extra guidance."
762824
},
763825
"name": {
764826
"type": "string",
@@ -874,7 +936,7 @@
874936
},
875937
"instruction": {
876938
"type": "string",
877-
"description": "Custom instruction for this toolset. By default, setting this field replaces the toolset's built-in instructions entirely. To enrich (rather than replace) the original instructions, include the placeholder {ORIGINAL_INSTRUCTIONS} in your text it will be substituted with the toolset's built-in instructions at runtime. For example: '{ORIGINAL_INSTRUCTIONS}\nAlways prefer JSON output.' will prepend the original instructions and append your extra guidance."
939+
"description": "Custom instruction for this toolset. By default, setting this field replaces the toolset's built-in instructions entirely. To enrich (rather than replace) the original instructions, include the placeholder {ORIGINAL_INSTRUCTIONS} in your text \u2014 it will be substituted with the toolset's built-in instructions at runtime. For example: '{ORIGINAL_INSTRUCTIONS}\nAlways prefer JSON output.' will prepend the original instructions and append your extra guidance."
878940
},
879941
"toon": {
880942
"type": "string",

docs/_data/nav.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@
119119
url: /providers/minimax/
120120
- title: Local Models
121121
url: /providers/local/
122-
- title: Custom Providers
122+
- title: Provider Definitions
123123
url: /providers/custom/
124124

125125
- section: Guides

docs/configuration/models/index.md

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -189,3 +189,30 @@ models:
189189
```
190190

191191
See [Local Models]({{ '/providers/local/' | relative_url }}) for more examples of custom endpoints.
192+
193+
## Inheriting from Provider Definitions
194+
195+
Models can reference a named provider to inherit shared defaults. Model-level settings always take precedence:
196+
197+
```yaml
198+
providers:
199+
my_anthropic:
200+
provider: anthropic
201+
token_key: MY_ANTHROPIC_KEY
202+
max_tokens: 16384
203+
thinking_budget: high
204+
temperature: 0.5
205+
206+
models:
207+
claude:
208+
provider: my_anthropic
209+
model: claude-sonnet-4-5
210+
# Inherits max_tokens, thinking_budget, temperature from provider
211+
212+
claude_fast:
213+
provider: my_anthropic
214+
model: claude-haiku-4-5
215+
thinking_budget: low # Overrides provider default
216+
```
217+
218+
See [Provider Definitions]({{ '/providers/custom/' | relative_url }}) for the full list of inheritable properties.

docs/configuration/overview/index.md

Lines changed: 35 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -46,12 +46,12 @@ rag:
4646
- type: chunked-embeddings
4747
model: openai/text-embedding-3-small
4848

49-
# 6. Providers — optional custom provider definitions
49+
# 6. Providers — optional reusable provider definitions
5050
providers:
5151
my_provider:
52-
api_type: openai_chatcompletions
53-
base_url: https://api.example.com/v1
52+
provider: anthropic # or openai (default), google, amazon-bedrock, etc.
5453
token_key: MY_API_KEY
54+
max_tokens: 16384
5555

5656
# 7. Permissions — agent-level tool permission rules (optional)
5757
# For user-wide global permissions, see ~/.config/cagent/config.yaml
@@ -220,34 +220,52 @@ See [Agent Distribution]({{ '/concepts/distribution/' | relative_url }}) for pub
220220

221221
## Custom Providers Section
222222

223-
Define reusable provider configurations for custom or self-hosted endpoints:
223+
Define reusable provider configurations with shared defaults. Providers can wrap any provider type — not just OpenAI-compatible endpoints:
224224

225225
```yaml
226226
providers:
227+
# OpenAI-compatible custom endpoint
227228
azure:
228229
api_type: openai_chatcompletions
229230
base_url: https://my-resource.openai.azure.com/openai/deployments/gpt-4o
230231
token_key: AZURE_OPENAI_API_KEY
231232
232-
internal_llm:
233-
api_type: openai_chatcompletions
234-
base_url: https://llm.internal.company.com/v1
235-
token_key: INTERNAL_API_KEY
233+
# Anthropic with shared model defaults
234+
team_anthropic:
235+
provider: anthropic
236+
token_key: TEAM_ANTHROPIC_KEY
237+
max_tokens: 32768
238+
thinking_budget: high
236239
237240
models:
238241
azure_gpt:
239-
provider: azure # References the custom provider
242+
provider: azure
240243
model: gpt-4o
241244
245+
claude:
246+
provider: team_anthropic
247+
model: claude-sonnet-4-5
248+
# Inherits max_tokens, thinking_budget from provider
249+
242250
agents:
243251
root:
244-
model: azure_gpt
252+
model: claude
245253
```
246254

247-
| Field | Description |
248-
| ----------- | -------------------------------------------------------------------- |
249-
| `api_type` | API schema: `openai_chatcompletions` (default) or `openai_responses` |
250-
| `base_url` | Base URL for the API endpoint |
251-
| `token_key` | Environment variable name for the API token |
252-
253-
See [Custom Providers]({{ '/providers/custom/' | relative_url }}) for more details.
255+
| Field | Description |
256+
| --------------------- | ---------------------------------------------------------------------------------------- |
257+
| `provider` | Underlying provider type: `openai` (default), `anthropic`, `google`, `amazon-bedrock`, etc. |
258+
| `api_type` | API schema: `openai_chatcompletions` (default) or `openai_responses`. OpenAI-only. |
259+
| `base_url` | Base URL for the API endpoint. Required for OpenAI-compatible providers. |
260+
| `token_key` | Environment variable name for the API token. |
261+
| `temperature` | Default sampling temperature. |
262+
| `max_tokens` | Default maximum response tokens. |
263+
| `thinking_budget` | Default reasoning effort/budget. |
264+
| `top_p` | Default top-p sampling parameter. |
265+
| `frequency_penalty` | Default frequency penalty. |
266+
| `presence_penalty` | Default presence penalty. |
267+
| `parallel_tool_calls` | Enable parallel tool calls by default. |
268+
| `track_usage` | Track token usage by default. |
269+
| `provider_opts` | Provider-specific options. |
270+
271+
See [Provider Definitions]({{ '/providers/custom/' | relative_url }}) for more details.

docs/getting-started/introduction/index.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ their model, personality, tools, and how they collaborate — and docker-agent h
3030
<div class="feature">
3131
<div class="feature-icon">🧠</div>
3232
<h3>Multi-Model Support</h3>
33-
<p>OpenAI, Anthropic, Google Gemini, AWS Bedrock, Docker Model Runner, and custom OpenAI-compatible providers.</p>
33+
<p>OpenAI, Anthropic, Google Gemini, AWS Bedrock, Docker Model Runner, and reusable provider definitions with shared defaults.</p>
3434

3535
</div>
3636
<div class="feature">

0 commit comments

Comments
 (0)