|
| 1 | +"""End-to-end vLLM contract tests for ART LocalBackend.""" |
| 2 | + |
| 3 | +import os |
| 4 | +import tempfile |
| 5 | +import uuid |
| 6 | + |
| 7 | +import openai |
| 8 | +import pytest |
| 9 | + |
| 10 | +torch = pytest.importorskip("torch") |
| 11 | +pytest.importorskip("vllm") |
| 12 | + |
| 13 | +import art |
| 14 | +from art.local import LocalBackend |
| 15 | +from art.types import LocalTrainResult |
| 16 | + |
| 17 | +DEFAULT_BASE_MODEL = "Qwen/Qwen3-0.6B" |
| 18 | +DEFAULT_GPU_MEMORY_UTILIZATION = 0.2 |
| 19 | +DEFAULT_MAX_MODEL_LEN = 2048 |
| 20 | +DEFAULT_MAX_SEQ_LENGTH = 2048 |
| 21 | + |
| 22 | + |
| 23 | +def get_base_model() -> str: |
| 24 | + return os.environ.get("BASE_MODEL", DEFAULT_BASE_MODEL) |
| 25 | + |
| 26 | + |
| 27 | +def get_safe_gpu_memory_utilization() -> float: |
| 28 | + requested = float( |
| 29 | + os.environ.get( |
| 30 | + "ART_TEST_GPU_MEMORY_UTILIZATION", |
| 31 | + str(DEFAULT_GPU_MEMORY_UTILIZATION), |
| 32 | + ) |
| 33 | + ) |
| 34 | + min_free_gib = float(os.environ.get("ART_TEST_MIN_FREE_GPU_GIB", "8")) |
| 35 | + free_bytes, total_bytes = torch.cuda.mem_get_info() |
| 36 | + free_gib = free_bytes / (1024**3) |
| 37 | + if free_gib < min_free_gib: |
| 38 | + pytest.skip( |
| 39 | + f"Insufficient free GPU memory for vLLM contract test: {free_gib:.1f} GiB free < {min_free_gib:.1f} GiB required." |
| 40 | + ) |
| 41 | + # Keep requested utilization below currently free memory with headroom. |
| 42 | + return max(0.02, min(requested, (free_bytes / total_bytes) * 0.8)) |
| 43 | + |
| 44 | + |
| 45 | +def get_vllm_test_config() -> art.dev.InternalModelConfig: |
| 46 | + return { |
| 47 | + "engine_args": { |
| 48 | + "gpu_memory_utilization": get_safe_gpu_memory_utilization(), |
| 49 | + "max_model_len": int( |
| 50 | + os.environ.get("ART_TEST_MAX_MODEL_LEN", str(DEFAULT_MAX_MODEL_LEN)) |
| 51 | + ), |
| 52 | + "max_num_seqs": 8, |
| 53 | + "enforce_eager": True, |
| 54 | + }, |
| 55 | + "init_args": { |
| 56 | + "max_seq_length": int( |
| 57 | + os.environ.get("ART_TEST_MAX_SEQ_LENGTH", str(DEFAULT_MAX_SEQ_LENGTH)) |
| 58 | + ), |
| 59 | + }, |
| 60 | + } |
| 61 | + |
| 62 | + |
| 63 | +async def simple_rollout( |
| 64 | + client: openai.AsyncOpenAI, model_name: str, prompt: str |
| 65 | +) -> art.Trajectory: |
| 66 | + messages: art.Messages = [{"role": "user", "content": prompt}] |
| 67 | + completion = await client.chat.completions.create( |
| 68 | + messages=messages, |
| 69 | + model=model_name, |
| 70 | + max_tokens=10, |
| 71 | + timeout=60, |
| 72 | + temperature=1, |
| 73 | + logprobs=True, |
| 74 | + top_logprobs=0, |
| 75 | + ) |
| 76 | + choice = completion.choices[0] |
| 77 | + content = (choice.message.content or "").lower() |
| 78 | + if "yes" in content: |
| 79 | + reward = 1.0 |
| 80 | + elif "no" in content: |
| 81 | + reward = 0.5 |
| 82 | + elif "maybe" in content: |
| 83 | + reward = 0.25 |
| 84 | + else: |
| 85 | + reward = 0.0 |
| 86 | + return art.Trajectory(messages_and_choices=[*messages, choice], reward=reward) |
| 87 | + |
| 88 | + |
| 89 | +async def assert_chat_logprobs( |
| 90 | + client: openai.AsyncOpenAI, |
| 91 | + model_name: str, |
| 92 | +) -> None: |
| 93 | + completion = await client.chat.completions.create( |
| 94 | + messages=[{"role": "user", "content": "Say hello."}], |
| 95 | + model=model_name, |
| 96 | + max_tokens=8, |
| 97 | + timeout=60, |
| 98 | + logprobs=True, |
| 99 | + top_logprobs=0, |
| 100 | + ) |
| 101 | + assert completion.choices[0].logprobs is not None |
| 102 | + |
| 103 | + |
| 104 | +@pytest.mark.skipif( |
| 105 | + not torch.cuda.is_available(), |
| 106 | + reason="No CUDA available in this environment", |
| 107 | +) |
| 108 | +async def test_local_backend_vllm_contract() -> None: |
| 109 | + model_name = f"test-vllm-contract-{uuid.uuid4().hex[:8]}" |
| 110 | + with tempfile.TemporaryDirectory() as tmpdir: |
| 111 | + backend = LocalBackend(path=tmpdir) |
| 112 | + model = art.TrainableModel( |
| 113 | + name=model_name, |
| 114 | + project="integration-tests", |
| 115 | + base_model=get_base_model(), |
| 116 | + ) |
| 117 | + object.__setattr__(model, "_internal_config", get_vllm_test_config()) |
| 118 | + try: |
| 119 | + await model.register(backend) |
| 120 | + client = model.openai_client() |
| 121 | + |
| 122 | + step0_name = model.get_inference_name(step=0) |
| 123 | + await assert_chat_logprobs(client, step0_name) |
| 124 | + |
| 125 | + model_ids = [m.id async for m in client.models.list()] |
| 126 | + assert f"{model.name}@0" in model_ids |
| 127 | + |
| 128 | + train_groups = await art.gather_trajectory_groups( |
| 129 | + [ |
| 130 | + art.TrajectoryGroup( |
| 131 | + [simple_rollout(client, step0_name, prompt) for _ in range(2)] |
| 132 | + ) |
| 133 | + for prompt in ("Say yes", "Say no") |
| 134 | + ] # ty:ignore[invalid-argument-type] |
| 135 | + ) |
| 136 | + result = await backend.train(model, train_groups, learning_rate=1e-5) |
| 137 | + assert isinstance(result, LocalTrainResult) |
| 138 | + assert result.step > 0 |
| 139 | + |
| 140 | + latest_name = model.get_inference_name(step=result.step) |
| 141 | + await assert_chat_logprobs(client, latest_name) |
| 142 | + await assert_chat_logprobs(client, step0_name) |
| 143 | + |
| 144 | + model_ids_after = [m.id async for m in client.models.list()] |
| 145 | + assert f"{model.name}@0" in model_ids_after |
| 146 | + assert f"{model.name}@{result.step}" in model_ids_after |
| 147 | + finally: |
| 148 | + await backend.close() |
0 commit comments