Skip to content

Commit afbf863

Browse files
committed
chore: update tinker dependency version to 0.8.1 in pyproject.toml and uv.lock
- Upgraded the tinker dependency from version 0.7.0 to 0.8.1 in both pyproject.toml and uv.lock to ensure compatibility with the latest features and fixes.
1 parent aa21da9 commit afbf863

10 files changed

Lines changed: 316 additions & 302 deletions

File tree

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ dependencies = [
99
"typer>=0.15.2",
1010
"litellm>=1.71.1",
1111
"weave>=0.52.23",
12-
"tinker>=0.7.0",
12+
"tinker>=0.8.1",
1313
"tinker-cookbook>=0.1.0",
1414
"polars>=1.26.0",
1515
"tblib>=3.0.0",

src/art/local/backend.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -267,11 +267,12 @@ async def _prepare_backend_for_training(
267267
config: dev.OpenAIServerConfig | None = None,
268268
) -> tuple[str, str]:
269269
service = await self._get_service(model)
270-
await service.start_openai_server(config=config)
271-
server_args = (config or {}).get("server_args", {})
270+
host, port = await service.start_openai_server(config=config)
272271

273-
base_url = f"http://{server_args.get('host', '0.0.0.0')}:{server_args.get('port', 8000)}/v1"
274-
api_key = server_args.get("api_key", None) or "default"
272+
base_url = f"http://{host}:{port}/v1"
273+
api_key = (config or {}).get("server_args", {}).get(
274+
"api_key", None
275+
) or "default"
275276

276277
def done_callback(_: asyncio.Task[None]) -> None:
277278
close_proxy(self._services.pop(model.name))

src/art/local/service.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ def __init__(
1717

1818
async def start_openai_server(
1919
self, config: dev.OpenAIServerConfig | None
20-
) -> None: ...
20+
) -> tuple[str, int]: ...
2121

2222
async def vllm_engine_is_sleeping(self) -> bool: ...
2323

src/art/tinker/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
11
from .backend import TinkerBackend
2+
from .renderers import get_renderer_name
3+
from .server import OpenAICompatibleTinkerServer
24

3-
__all__ = ["TinkerBackend"]
5+
__all__ = ["TinkerBackend", "get_renderer_name", "OpenAICompatibleTinkerServer"]

src/art/tinker/backend.py

Lines changed: 2 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
import os
2-
from typing import Any, cast
2+
from typing import cast
33

44
from mp_actors import move_to_child_process
55

6-
from .. import dev
76
from ..local.backend import LocalBackend
87
from ..local.service import ModelService
98
from ..model import TrainableModel
109
from ..utils.output_dirs import get_model_dir
10+
from .renderers import get_renderer_name
1111

1212

1313
class TinkerBackend(LocalBackend):
@@ -26,38 +26,6 @@ def __init__(
2626
os.environ["TINKER_API_KEY"] = tinker_api_key
2727
super().__init__(in_process=in_process, path=path)
2828

29-
async def _prepare_backend_for_training(
30-
self,
31-
model: TrainableModel,
32-
config: dev.OpenAIServerConfig | None = None,
33-
) -> tuple[str, str]:
34-
"""Start the local OpenAI server and return its base URL + API key."""
35-
service = await self._get_service(model)
36-
raw_config: dict[str, Any] = cast(dict[str, Any], config) if config else {}
37-
38-
server_args = cast(dict[str, Any], raw_config.get("server_args", {}))
39-
host = server_args.get("host", raw_config.get("host", "0.0.0.0"))
40-
port = server_args.get("port", raw_config.get("port"))
41-
if port is None:
42-
from .service import get_free_port
43-
44-
port = get_free_port()
45-
api_key = server_args.get("api_key", raw_config.get("api_key")) or "default"
46-
47-
# Ensure the Tinker server binds to the same host/port we return.
48-
tinker_config = cast(
49-
dev.OpenAIServerConfig,
50-
{
51-
**raw_config,
52-
"host": host,
53-
"port": port,
54-
},
55-
)
56-
await service.start_openai_server(config=tinker_config)
57-
58-
base_url = f"http://{host}:{port}/v1"
59-
return base_url, api_key
60-
6129
async def _get_service(self, model: TrainableModel) -> ModelService:
6230
from ..dev.get_model_config import get_model_config
6331
from ..dev.model import TinkerArgs, TinkerTrainingClientArgs
@@ -88,52 +56,3 @@ async def _get_service(self, model: TrainableModel) -> ModelService:
8856
process_name="tinker-service",
8957
)
9058
return self._services[model.name]
91-
92-
93-
renderer_name_message = """
94-
To manually specify a renderer (and silence this message), you can set the "renderer_name" field like so:
95-
96-
model = art.TrainableModel(
97-
name="my-model",
98-
project="my-project",
99-
base_model="Qwen/Qwen3-8B",
100-
_internal_config=art.dev.InternalModelConfig(
101-
tinker_args=art.dev.TinkerArgs(renderer_name="qwen3_disable_thinking"),
102-
),
103-
)
104-
105-
Valid renderer names are:
106-
107-
- llama3
108-
- qwen3
109-
- qwen3_disable_thinking
110-
- qwen3_instruct
111-
- deepseekv3
112-
- deepseekv3_disable_thinking
113-
- gpt_oss_no_sysprompt
114-
- gpt_oss_low_reasoning
115-
- gpt_oss_medium_reasoning
116-
- gpt_oss_high_reasoning
117-
""".strip()
118-
119-
120-
def get_renderer_name(base_model: str) -> str:
121-
if base_model.startswith("meta-llama/"):
122-
return "llama3"
123-
elif base_model.startswith("Qwen/Qwen3-"):
124-
if "Instruct" in base_model:
125-
return "qwen3_instruct"
126-
else:
127-
print("Defaulting to Qwen3 renderer without thinking for", base_model)
128-
print(renderer_name_message)
129-
return "qwen3_disable_thinking"
130-
elif base_model.startswith("deepseek-ai/DeepSeek-V3"):
131-
print("Defaulting to DeepSeekV3 renderer without thinking for", base_model)
132-
print(renderer_name_message)
133-
return "deepseekv3_disable_thinking"
134-
elif base_model.startswith("openai/gpt-oss"):
135-
print("Defaulting to GPT-OSS renderer without system prompt for", base_model)
136-
print(renderer_name_message)
137-
return "gpt_oss_no_sysprompt"
138-
else:
139-
raise ValueError(f"Unknown base model: {base_model}")

src/art/tinker/renderers.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
def get_renderer_name(base_model: str) -> str:
2+
if base_model.startswith("meta-llama/"):
3+
return "llama3"
4+
elif base_model.startswith("Qwen/Qwen3-"):
5+
if "Instruct" in base_model:
6+
return "qwen3_instruct"
7+
else:
8+
print("Defaulting to Qwen3 renderer without thinking for", base_model)
9+
print(renderer_name_message)
10+
return "qwen3_disable_thinking"
11+
elif base_model.startswith("deepseek-ai/DeepSeek-V3"):
12+
print("Defaulting to DeepSeekV3 renderer without thinking for", base_model)
13+
print(renderer_name_message)
14+
return "deepseekv3_disable_thinking"
15+
elif base_model.startswith("openai/gpt-oss"):
16+
print("Defaulting to GPT-OSS renderer without system prompt for", base_model)
17+
print(renderer_name_message)
18+
return "gpt_oss_no_sysprompt"
19+
else:
20+
raise ValueError(f"Unknown base model: {base_model}")
21+
22+
23+
renderer_name_message = """
24+
To manually specify a renderer (and silence this message), you can set the "renderer_name" field like so:
25+
26+
model = art.TrainableModel(
27+
name="my-model",
28+
project="my-project",
29+
base_model="Qwen/Qwen3-8B",
30+
_internal_config=art.dev.InternalModelConfig(
31+
tinker_args=art.dev.TinkerArgs(renderer_name="qwen3_disable_thinking"),
32+
),
33+
)
34+
35+
Valid renderer names are:
36+
37+
- llama3
38+
- qwen3
39+
- qwen3_disable_thinking
40+
- qwen3_instruct
41+
- deepseekv3
42+
- deepseekv3_disable_thinking
43+
- gpt_oss_no_sysprompt
44+
- gpt_oss_low_reasoning
45+
- gpt_oss_medium_reasoning
46+
- gpt_oss_high_reasoning
47+
""".strip()

0 commit comments

Comments
 (0)