| | import argparse |
| | import time |
| |
|
| | import datasets |
| | import torch |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | from transformers.generation import GenerationConfig |
| |
|
| |
|
| | MODEL_ID = "Qwen/Qwen3-4B-Instruct-2507" |
| |
|
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser() |
| | parser.add_argument("--samples", type=int, default=100, help="Number of prompts to run") |
| | parser.add_argument("--batch-size", "-bs", type=int, default=32, help="Static batch size") |
| | parser.add_argument("--max-new-tokens", type=int, default=512, help="Max new tokens per request") |
| | parser.add_argument("--warmup", type=int, default=1, help="Warmup batches (excluded from timing)") |
| | args = parser.parse_args() |
| |
|
| | |
| | model = AutoModelForCausalLM.from_pretrained( |
| | MODEL_ID, |
| | attn_implementation="sdpa", |
| | torch_dtype=torch.bfloat16, |
| | ).cuda().eval() |
| |
|
| | |
| | tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, padding_side="left") |
| | if tokenizer.pad_token_id is None: |
| | tokenizer.pad_token = tokenizer.eos_token |
| |
|
| | |
| | dataset = datasets.load_dataset("openai/gsm8k", "socratic", split="test") |
| | dataset = dataset.select(range(args.samples)) |
| |
|
| | |
| | encoded = tokenizer(list(dataset["question"]), padding=False, truncation=False) |
| | inputs = [{"input_ids": ids, "attention_mask": attn} |
| | for ids, attn in zip(encoded["input_ids"], encoded["attention_mask"])] |
| |
|
| | |
| | gen_cfg = GenerationConfig( |
| | do_sample=False, |
| | max_new_tokens=args.max_new_tokens, |
| | eos_token_id=tokenizer.eos_token_id, |
| | pad_token_id=tokenizer.pad_token_id, |
| | use_cuda_graph=False, |
| | ) |
| |
|
| | |
| | def make_batch(items): |
| | batch = tokenizer.pad(items, padding=True, return_tensors="pt") |
| | return {k: v.cuda(non_blocking=True) for k, v in batch.items()} |
| |
|
| | |
| | model_inputs = [] |
| | if args.warmup > 0: |
| | warm = make_batch(inputs[: min(len(inputs), args.batch_size * args.warmup)]) |
| | with torch.no_grad(): |
| | _ = model.generate(**warm, generation_config=gen_cfg) |
| |
|
| | |
| | token_count = 0 |
| | bs = args.batch_size |
| | start = time.time() |
| | with torch.no_grad(): |
| | for i in range(0, len(inputs), bs): |
| | batch_items = inputs[i : i + bs] |
| | batch = make_batch(batch_items) |
| |
|
| | |
| | outputs = model.generate(**batch, generation_config=gen_cfg) |
| |
|
| | |
| | |
| | pad_id = tokenizer.pad_token_id |
| | input_lens = batch["attention_mask"].sum(dim=1).tolist() |
| | for row, in_len in zip(outputs, input_lens): |
| | seq = row.tolist() |
| | gen_part = seq[int(in_len):] |
| | token_count += sum(1 for t in gen_part if t != pad_id) |
| |
|
| | end = time.time() |
| | elapsed = end - start |
| | tps = token_count / elapsed if elapsed > 0 else 0.0 |
| |
|
| | print("-" * 20) |
| | print("--- Finished Static Batching Benchmark ---\n") |
| | print(f"Model: {MODEL_ID}") |
| | print(f"Attention: sdpa | Batch size: {args.batch_size} | Samples: {args.samples} | Max new tokens: {args.max_new_tokens}") |
| | print(f"Generation time (no warmup): {elapsed:.2f} s for {token_count} generated tokens -> {tps:.2f} tok/s") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|
| | |
| | |