Skip to content

Commit 68dd097

Browse files
authored
Merge pull request #622 from docker/add-skills
Add skills command for AI coding assistants
2 parents 6aca887 + 751db70 commit 68dd097

8 files changed

Lines changed: 639 additions & 0 deletions

File tree

cmd/cli/commands/root.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ func NewRootCmd(cli *command.DockerCli) *cobra.Command {
9494
newRestartRunner(),
9595
newReinstallRunner(),
9696
newSearchCmd(),
97+
newSkillsCmd(),
9798
)
9899

99100
// Commands that require a running model runner. These are wrapped to ensure the standalone runner is available.

cmd/cli/commands/skills.go

Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
package commands
2+
3+
import (
4+
"embed"
5+
"fmt"
6+
"io/fs"
7+
"os"
8+
"path/filepath"
9+
10+
"github.com/docker/model-runner/cmd/cli/commands/completion"
11+
"github.com/spf13/cobra"
12+
)
13+
14+
//go:embed skills/*
15+
var skillsFS embed.FS
16+
17+
type skillsOptions struct {
18+
codex bool
19+
claude bool
20+
opencode bool
21+
dest string
22+
force bool
23+
}
24+
25+
func newSkillsCmd() *cobra.Command {
26+
opts := &skillsOptions{}
27+
28+
c := &cobra.Command{
29+
Use: "skills",
30+
Short: "Install Docker Model Runner skills for AI coding assistants",
31+
Long: `Install Docker Model Runner skills for AI coding assistants.
32+
33+
Skills are configuration files that help AI coding assistants understand
34+
how to use Docker Model Runner effectively for local model inference.
35+
36+
Supported targets:
37+
--codex Install to ~/.codex/skills (OpenAI Codex CLI)
38+
--claude Install to ~/.claude/skills (Claude Code)
39+
--opencode Install to ~/.config/opencode/skills (OpenCode)
40+
--dest Install to a custom directory
41+
42+
Example:
43+
docker model skills --claude
44+
docker model skills --codex --claude
45+
docker model skills --dest /path/to/skills`,
46+
RunE: func(cmd *cobra.Command, args []string) error {
47+
return runSkills(cmd, opts)
48+
},
49+
ValidArgsFunction: completion.NoComplete,
50+
}
51+
52+
c.Flags().BoolVar(&opts.codex, "codex", false, "Install skills for OpenAI Codex CLI (~/.codex/skills)")
53+
c.Flags().BoolVar(&opts.claude, "claude", false, "Install skills for Claude Code (~/.claude/skills)")
54+
c.Flags().BoolVar(&opts.opencode, "opencode", false, "Install skills for OpenCode (~/.config/opencode/skills)")
55+
c.Flags().StringVar(&opts.dest, "dest", "", "Install skills to a custom directory")
56+
c.Flags().BoolVarP(&opts.force, "force", "f", false, "Overwrite existing skills without prompting")
57+
58+
return c
59+
}
60+
61+
func runSkills(cmd *cobra.Command, opts *skillsOptions) error {
62+
// Collect target directories
63+
var targets []string
64+
homeDir, err := os.UserHomeDir()
65+
if err != nil {
66+
return fmt.Errorf("failed to get home directory: %w", err)
67+
}
68+
69+
if opts.codex {
70+
targets = append(targets, filepath.Join(homeDir, ".codex", "skills"))
71+
}
72+
if opts.claude {
73+
targets = append(targets, filepath.Join(homeDir, ".claude", "skills"))
74+
}
75+
if opts.opencode {
76+
targets = append(targets, filepath.Join(homeDir, ".config", "opencode", "skills"))
77+
}
78+
if opts.dest != "" {
79+
targets = append(targets, opts.dest)
80+
}
81+
82+
if len(targets) == 0 {
83+
return fmt.Errorf("no target specified. Use --codex, --claude, --opencode, or --dest")
84+
}
85+
86+
// Install skills to each target
87+
for _, target := range targets {
88+
if err := installSkills(cmd, target, opts.force); err != nil {
89+
return fmt.Errorf("failed to install skills to %s: %w", target, err)
90+
}
91+
cmd.Printf("Installed Docker Model Runner skills to %s\n", target)
92+
}
93+
94+
return nil
95+
}
96+
97+
func installSkills(cmd *cobra.Command, targetDir string, force bool) error {
98+
// Walk through embedded skills directory
99+
return fs.WalkDir(skillsFS, "skills", func(path string, d fs.DirEntry, err error) error {
100+
if err != nil {
101+
return err
102+
}
103+
104+
// Skip the root "skills" directory itself
105+
if path == "skills" {
106+
return nil
107+
}
108+
109+
// Calculate the relative path from "skills/"
110+
relPath, err := filepath.Rel("skills", path)
111+
if err != nil {
112+
return err
113+
}
114+
115+
destPath := filepath.Join(targetDir, relPath)
116+
117+
if d.IsDir() {
118+
// Create directory
119+
return os.MkdirAll(destPath, 0755)
120+
}
121+
122+
// Check if file exists and handle force flag
123+
if _, err := os.Stat(destPath); err == nil && !force {
124+
return fmt.Errorf("file already exists: %s (use --force to overwrite)", destPath)
125+
}
126+
127+
// Read the embedded file
128+
content, err := skillsFS.ReadFile(path)
129+
if err != nil {
130+
return fmt.Errorf("failed to read embedded file %s: %w", path, err)
131+
}
132+
133+
// Ensure parent directory exists
134+
if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
135+
return fmt.Errorf("failed to create directory for %s: %w", destPath, err)
136+
}
137+
138+
// Write the file
139+
if err := os.WriteFile(destPath, content, 0644); err != nil {
140+
return fmt.Errorf("failed to write file %s: %w", destPath, err)
141+
}
142+
143+
return nil
144+
})
145+
}
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
---
2+
name: docker-model-runner
3+
description: Skills for using Docker Model Runner to run local LLM inference
4+
---
5+
6+
# Docker Model Runner
7+
8+
Docker Model Runner (DMR) makes it easy to run AI models locally using Docker. This skill helps you effectively use Docker Model Runner for local LLM inference in your development workflow.
9+
10+
## Workflow
11+
12+
When helping users with local LLM inference using Docker Model Runner:
13+
14+
1. **Check if Docker Model Runner is available** by running `docker model version`
15+
16+
2. **List available models** with `docker model list` to see what's already pulled
17+
18+
3. **Search for models** on Docker Hub or HuggingFace:
19+
- `docker model search <query>` to find models
20+
- Popular models include: `ai/gemma3`, `ai/llama3.2`, `ai/smollm2`, `ai/qwen3`
21+
22+
4. **Pull models** before running: `docker model pull <model>`
23+
24+
5. **Run models** for inference:
25+
- One-time prompt: `docker model run ai/smollm2 "Your prompt here"`
26+
- Interactive chat: `docker model run ai/smollm2`
27+
- Pre-load model: `docker model run --detach ai/smollm2`
28+
29+
6. **Use the OpenAI-compatible API** for programmatic access:
30+
- Endpoint: `http://localhost:12434/engines/llama.cpp/v1/chat/completions`
31+
- This is compatible with OpenAI client libraries
32+
33+
## API Usage
34+
35+
Docker Model Runner exposes an OpenAI-compatible REST API:
36+
37+
```bash
38+
# Chat completions
39+
curl http://localhost:12434/engines/llama.cpp/v1/chat/completions \
40+
-H "Content-Type: application/json" \
41+
-d '{
42+
"model": "ai/smollm2",
43+
"messages": [
44+
{"role": "system", "content": "You are a helpful assistant."},
45+
{"role": "user", "content": "Hello!"}
46+
]
47+
}'
48+
```
49+
50+
For Python with the OpenAI library:
51+
52+
```python
53+
from openai import OpenAI
54+
55+
client = OpenAI(
56+
base_url="http://localhost:12434/engines/llama.cpp/v1",
57+
api_key="not-needed" # API key not required for local inference
58+
)
59+
60+
response = client.chat.completions.create(
61+
model="ai/smollm2",
62+
messages=[{"role": "user", "content": "Hello!"}]
63+
)
64+
```
65+
66+
## Key Commands
67+
68+
| Command | Description |
69+
|---------|-------------|
70+
| `docker model run <model> [prompt]` | Run a model with optional prompt |
71+
| `docker model pull <model>` | Pull a model from registry |
72+
| `docker model list` | List downloaded models |
73+
| `docker model search <query>` | Search for models |
74+
| `docker model ps` | Show running models |
75+
| `docker model rm <model>` | Remove a model |
76+
| `docker model inspect <model>` | Show model details |
77+
78+
## Best Practices
79+
80+
- Use smaller models (like `ai/smollm2`) for faster responses during development
81+
- Pre-load models with `--detach` for better performance in scripts
82+
- Models stay loaded until another model is requested or timeout (5 min)
83+
- Use the OpenAI-compatible API for integration with existing tools
84+
85+
## References
86+
87+
See [references/docker-model-guide.md](references/docker-model-guide.md) for detailed documentation.

0 commit comments

Comments
 (0)