-
Notifications
You must be signed in to change notification settings - Fork 17
Expand file tree
/
Copy pathtrain.py
More file actions
142 lines (117 loc) · 4.26 KB
/
train.py
File metadata and controls
142 lines (117 loc) · 4.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
from dotenv import load_dotenv
load_dotenv()
from langchain_core.messages import AIMessage
# Monkeypatch the AIMessage init to auto-fix args
_original_init = AIMessage.__init__
def _patched_init(self, **kwargs):
tool_calls = kwargs.get("tool_calls", [])
for call in tool_calls:
if isinstance(call.get("args"), str):
try:
print(call['args'])
call["args"] = json.loads(call["args"])
except json.JSONDecodeError:
call["args"] = {}
_original_init(self, **kwargs)
AIMessage.__init__ = _patched_init
import art
from art.local import LocalBackend
import asyncio
from typing import List
from rollout import rollout
from art.utils import iterate_dataset
import os
import statistics
from art.rewards import ruler_score_group
import tenacity
from art.langgraph import wrap_rollout
from tqdm.asyncio import tqdm
import random
from dataclasses import dataclass
import json
import threading
import os
import requests
import zipfile
import io
# import torch
import polars as pl
@dataclass
class Scenario:
prompt: str
prompt_id: str
async def benchmark_model(
model: art.Model,
val_scenarios: List[Scenario]
):
val_scenarios = val_scenarios + val_scenarios
val_trajectories = await tqdm.gather(
*(wrap_rollout(model, rollout)(scenario.prompt, scenario.prompt_id) for scenario in val_scenarios),
desc=f"validation {model.name}",
)
valid_trajectories = [t for t in val_trajectories if isinstance(t, art.Trajectory)]
if model._backend is not None:
await model.log(valid_trajectories)
metrics = pl.DataFrame(
[{**t.metrics, "reward": t.reward} for t in valid_trajectories]
)
avg_metrics = metrics.select(
[pl.mean(c).alias(c) for c in metrics.columns]
).with_columns(pl.lit(len(valid_trajectories)).alias("n_trajectories"))
print(avg_metrics)
return avg_metrics
async def train(model: art.TrainableModel):
with LocalBackend() as backend:
model = art.TrainableModel(name="sft-deep-re", project="deep-re", base_model="unsloth/Qwen2.5-14B-Instruct")
backend._experimental_pull_from_s3(model, s3_bucket=os.environ["BACKUP_BUCKET"])
await model.register(backend)
train_scenarios: List[Scenario] = []
val_scenarios: List[Scenario] = []
sft_scenarios: List[Scenario] = []
PROMPT_FILE = 'deep_research_bench/data/prompt_data/query.jsonl'
with open(PROMPT_FILE) as f:
for line in f.readlines():
obj = json.loads(line)
if (obj['id']-1) % 50 >= 45:
val_scenarios.append(Scenario(prompt=obj["prompt"], prompt_id=obj["id"]))
elif (obj['id']-1) % 50 >= 20:
sft_scenarios.append(Scenario(prompt=obj["prompt"], prompt_id=obj["id"]))
else:
train_scenarios.append(Scenario(prompt=obj["prompt"], prompt_id=obj["id"]))
random.seed(23)
random.shuffle(train_scenarios)
train_iterator = iterate_dataset(
train_scenarios,
groups_per_step=1,
num_epochs=3,
initial_step=await model.get_step(),
)
for batch in train_iterator:
if (batch.step) % 10 == 0:
print(f"\n--- Evaluating at Iteration {batch.step} ---")
await benchmark_model(model, val_scenarios)
await backend._experimental_push_to_s3(
model,
s3_bucket=os.environ["BACKUP_BUCKET"],
)
await model.delete_checkpoints()
groups = await art.gather_trajectory_groups(
(
art.TrajectoryGroup(
(
wrap_rollout(model, rollout)(scenario.prompt, scenario.prompt_id)
for _ in range(16)
)
)
for scenario in batch.items
),
)
await model.train(
groups,
_config=art.dev.TrainConfig(
precalculate_logprobs=False
),
)
print("Training finished.")
if __name__ == "__main__":
asyncio.run(train(None))