|
| 1 | +import os |
| 2 | + |
| 3 | +from mp_actors import move_to_child_process |
| 4 | + |
| 5 | +from ..local.backend import LocalBackend |
| 6 | +from ..local.service import ModelService |
| 7 | +from ..model import TrainableModel |
| 8 | +from ..utils.output_dirs import get_model_dir |
| 9 | + |
| 10 | + |
| 11 | +class TinkerBackend(LocalBackend): |
| 12 | + def __init__( |
| 13 | + self, |
| 14 | + *, |
| 15 | + tinker_api_key: str | None = None, |
| 16 | + in_process: bool = False, |
| 17 | + path: str | None = None, |
| 18 | + ) -> None: |
| 19 | + if not "TINKER_API_KEY" in os.environ or tinker_api_key is not None: |
| 20 | + assert tinker_api_key is not None, ( |
| 21 | + "TINKER_API_KEY is not set and no tinker_api_key was provided" |
| 22 | + ) |
| 23 | + print("Setting TINKER_API_KEY to", tinker_api_key, "in environment") |
| 24 | + os.environ["TINKER_API_KEY"] = tinker_api_key |
| 25 | + super().__init__(in_process=in_process, path=path) |
| 26 | + |
| 27 | + async def _get_service(self, model: TrainableModel) -> ModelService: |
| 28 | + from ..dev.get_model_config import get_model_config |
| 29 | + from ..dev.model import TinkerArgs |
| 30 | + from .service import TinkerService |
| 31 | + |
| 32 | + if model.name not in self._services: |
| 33 | + config = get_model_config( |
| 34 | + base_model=model.base_model, |
| 35 | + output_dir=get_model_dir(model=model, art_path=self._path), |
| 36 | + config=model._internal_config, |
| 37 | + ) |
| 38 | + config["tinker_args"] = config.get("tinker_args") or TinkerArgs( |
| 39 | + renderer_name=get_renderer_name(model.base_model) |
| 40 | + ) |
| 41 | + self._services[model.name] = TinkerService( |
| 42 | + model_name=model.name, |
| 43 | + base_model=model.base_model, |
| 44 | + config=config, |
| 45 | + output_dir=get_model_dir(model=model, art_path=self._path), |
| 46 | + ) |
| 47 | + if not self._in_process: |
| 48 | + self._services[model.name] = move_to_child_process( |
| 49 | + self._services[model.name], |
| 50 | + process_name="tinker-service", |
| 51 | + ) |
| 52 | + return self._services[model.name] |
| 53 | + |
| 54 | + |
| 55 | +renderer_name_message = """ |
| 56 | +To manually specify a renderer (and silence this message), you can set the "renderer_name" field like so: |
| 57 | +
|
| 58 | +model = art.TrainableModel( |
| 59 | + name="my-model", |
| 60 | + project="my-project", |
| 61 | + base_model="Qwen/Qwen3-8B", |
| 62 | + _internal_config=art.dev.InternalModelConfig( |
| 63 | + tinker_args=art.dev.TinkerArgs(renderer_name="qwen3_disable_thinking"), |
| 64 | + ), |
| 65 | +) |
| 66 | +
|
| 67 | +Valid renderer names are: |
| 68 | +
|
| 69 | +- llama3 |
| 70 | +- qwen3 |
| 71 | +- qwen3_disable_thinking |
| 72 | +- qwen3_instruct |
| 73 | +- deepseekv3 |
| 74 | +- deepseekv3_disable_thinking |
| 75 | +- gpt_oss_no_sysprompt |
| 76 | +- gpt_oss_low_reasoning |
| 77 | +- gpt_oss_medium_reasoning |
| 78 | +- gpt_oss_high_reasoning |
| 79 | +""".strip() |
| 80 | + |
| 81 | + |
| 82 | +def get_renderer_name(base_model: str) -> str: |
| 83 | + if base_model.startswith("meta-llama/"): |
| 84 | + return "llama3" |
| 85 | + elif base_model.startswith("Qwen/Qwen3-"): |
| 86 | + if "Instruct" in base_model: |
| 87 | + return "qwen3_instruct" |
| 88 | + else: |
| 89 | + print("Defaulting to Qwen3 renderer without thinking for", base_model) |
| 90 | + print(renderer_name_message) |
| 91 | + return "qwen3_disable_thinking" |
| 92 | + elif base_model.startswith("deepseek-ai/DeepSeek-V3"): |
| 93 | + print("Defaulting to DeepSeekV3 renderer without thinking for", base_model) |
| 94 | + print(renderer_name_message) |
| 95 | + return "deepseekv3_disable_thinking" |
| 96 | + elif base_model.startswith("openai/gpt-oss"): |
| 97 | + print("Defaulting to GPT-OSS renderer without system prompt for", base_model) |
| 98 | + print(renderer_name_message) |
| 99 | + return "gpt_oss_no_sysprompt" |
| 100 | + else: |
| 101 | + raise ValueError(f"Unknown base model: {base_model}") |
0 commit comments