Commit
·
6311bc5
1
Parent(s):
c2b4647
Update model version and parameters in generate-responses.py
Browse files- generate-responses.py +19 -12
generate-responses.py
CHANGED
|
@@ -5,7 +5,7 @@
|
|
| 5 |
# "huggingface-hub[hf_transfer]",
|
| 6 |
# "torch",
|
| 7 |
# "transformers",
|
| 8 |
-
# "vllm",
|
| 9 |
# ]
|
| 10 |
#
|
| 11 |
# ///
|
|
@@ -150,7 +150,7 @@ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-respons
|
|
| 150 |
def main(
|
| 151 |
src_dataset_hub_id: str,
|
| 152 |
output_dataset_hub_id: str,
|
| 153 |
-
model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507
|
| 154 |
messages_column: str = "messages",
|
| 155 |
output_column: str = "response",
|
| 156 |
temperature: float = 0.7,
|
|
@@ -160,6 +160,7 @@ def main(
|
|
| 160 |
max_tokens: int = 16384,
|
| 161 |
repetition_penalty: float = 1.0,
|
| 162 |
gpu_memory_utilization: float = 0.90,
|
|
|
|
| 163 |
tensor_parallel_size: Optional[int] = None,
|
| 164 |
hf_token: Optional[str] = None,
|
| 165 |
):
|
|
@@ -179,6 +180,7 @@ def main(
|
|
| 179 |
max_tokens: Maximum tokens to generate
|
| 180 |
repetition_penalty: Repetition penalty parameter
|
| 181 |
gpu_memory_utilization: GPU memory utilization factor
|
|
|
|
| 182 |
tensor_parallel_size: Number of GPUs to use (auto-detect if None)
|
| 183 |
hf_token: Hugging Face authentication token
|
| 184 |
"""
|
|
@@ -213,11 +215,16 @@ def main(
|
|
| 213 |
|
| 214 |
# Initialize vLLM
|
| 215 |
logger.info(f"Loading model: {model_id}")
|
| 216 |
-
|
| 217 |
-
model
|
| 218 |
-
tensor_parallel_size
|
| 219 |
-
gpu_memory_utilization
|
| 220 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
|
| 222 |
# Load tokenizer for chat template
|
| 223 |
logger.info("Loading tokenizer...")
|
|
@@ -336,8 +343,8 @@ Examples:
|
|
| 336 |
parser.add_argument(
|
| 337 |
"--model-id",
|
| 338 |
type=str,
|
| 339 |
-
default="Qwen/Qwen3-30B-A3B-Instruct-2507
|
| 340 |
-
help="Model to use for generation (default: Qwen3-30B-A3B-Instruct-2507
|
| 341 |
)
|
| 342 |
parser.add_argument(
|
| 343 |
"--messages-column",
|
|
@@ -390,8 +397,8 @@ Examples:
|
|
| 390 |
parser.add_argument(
|
| 391 |
"--gpu-memory-utilization",
|
| 392 |
type=float,
|
| 393 |
-
default=0.
|
| 394 |
-
help="GPU memory utilization factor (default: 0.
|
| 395 |
)
|
| 396 |
parser.add_argument(
|
| 397 |
"--tensor-parallel-size",
|
|
@@ -439,7 +446,7 @@ Example HF Jobs command with multi-GPU:
|
|
| 439 |
username/input-dataset \\
|
| 440 |
username/output-dataset \\
|
| 441 |
--messages-column messages \\
|
| 442 |
-
--model-id Qwen/Qwen3-30B-A3B-Instruct-2507
|
| 443 |
--temperature 0.7 \\
|
| 444 |
--max-tokens 16384
|
| 445 |
""")
|
|
|
|
| 5 |
# "huggingface-hub[hf_transfer]",
|
| 6 |
# "torch",
|
| 7 |
# "transformers",
|
| 8 |
+
# "vllm>=0.8.5",
|
| 9 |
# ]
|
| 10 |
#
|
| 11 |
# ///
|
|
|
|
| 150 |
def main(
|
| 151 |
src_dataset_hub_id: str,
|
| 152 |
output_dataset_hub_id: str,
|
| 153 |
+
model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507",
|
| 154 |
messages_column: str = "messages",
|
| 155 |
output_column: str = "response",
|
| 156 |
temperature: float = 0.7,
|
|
|
|
| 160 |
max_tokens: int = 16384,
|
| 161 |
repetition_penalty: float = 1.0,
|
| 162 |
gpu_memory_utilization: float = 0.90,
|
| 163 |
+
max_model_len: Optional[int] = None,
|
| 164 |
tensor_parallel_size: Optional[int] = None,
|
| 165 |
hf_token: Optional[str] = None,
|
| 166 |
):
|
|
|
|
| 180 |
max_tokens: Maximum tokens to generate
|
| 181 |
repetition_penalty: Repetition penalty parameter
|
| 182 |
gpu_memory_utilization: GPU memory utilization factor
|
| 183 |
+
max_model_len: Maximum model context length (None uses model default)
|
| 184 |
tensor_parallel_size: Number of GPUs to use (auto-detect if None)
|
| 185 |
hf_token: Hugging Face authentication token
|
| 186 |
"""
|
|
|
|
| 215 |
|
| 216 |
# Initialize vLLM
|
| 217 |
logger.info(f"Loading model: {model_id}")
|
| 218 |
+
vllm_kwargs = {
|
| 219 |
+
"model": model_id,
|
| 220 |
+
"tensor_parallel_size": tensor_parallel_size,
|
| 221 |
+
"gpu_memory_utilization": gpu_memory_utilization,
|
| 222 |
+
}
|
| 223 |
+
if max_model_len is not None:
|
| 224 |
+
vllm_kwargs["max_model_len"] = max_model_len
|
| 225 |
+
logger.info(f"Using max_model_len={max_model_len}")
|
| 226 |
+
|
| 227 |
+
llm = LLM(**vllm_kwargs)
|
| 228 |
|
| 229 |
# Load tokenizer for chat template
|
| 230 |
logger.info("Loading tokenizer...")
|
|
|
|
| 343 |
parser.add_argument(
|
| 344 |
"--model-id",
|
| 345 |
type=str,
|
| 346 |
+
default="Qwen/Qwen3-30B-A3B-Instruct-2507",
|
| 347 |
+
help="Model to use for generation (default: Qwen3-30B-A3B-Instruct-2507)",
|
| 348 |
)
|
| 349 |
parser.add_argument(
|
| 350 |
"--messages-column",
|
|
|
|
| 397 |
parser.add_argument(
|
| 398 |
"--gpu-memory-utilization",
|
| 399 |
type=float,
|
| 400 |
+
default=0.95,
|
| 401 |
+
help="GPU memory utilization factor (default: 0.95)",
|
| 402 |
)
|
| 403 |
parser.add_argument(
|
| 404 |
"--tensor-parallel-size",
|
|
|
|
| 446 |
username/input-dataset \\
|
| 447 |
username/output-dataset \\
|
| 448 |
--messages-column messages \\
|
| 449 |
+
--model-id Qwen/Qwen3-30B-A3B-Instruct-2507 \\
|
| 450 |
--temperature 0.7 \\
|
| 451 |
--max-tokens 16384
|
| 452 |
""")
|