|
| 1 | +#!/usr/bin/env python3 |
| 2 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | +# All rights reserved. |
| 4 | +# |
| 5 | +# This source code is licensed under the BSD-style license found in the |
| 6 | +# LICENSE file in the root directory of this source tree. |
| 7 | + |
| 8 | +"""Benchmark: Pack GQA SDPA vs repeat_interleave + MHA SDPA. |
| 9 | +
|
| 10 | +Compares two approaches for GQA attention on consumer GPUs: |
| 11 | + 1. repeat_interleave: expand K/V to H_q heads, then call SDPA with H_q==H_kv |
| 12 | + 2. pack_gqa: call SDPA with enable_gqa=True (kernel handles head mapping) |
| 13 | +
|
| 14 | +Usage: |
| 15 | + LD_LIBRARY_PATH=/home/mnachin/local/miniconda3/envs/executorch/lib:$LD_LIBRARY_PATH \ |
| 16 | + python3 backends/cuda/tests/bench_sdpa_gqa.py |
| 17 | +""" |
| 18 | + |
| 19 | +import sys |
| 20 | +import os |
| 21 | +import time |
| 22 | + |
| 23 | +import torch |
| 24 | +import torch.nn.functional as F |
| 25 | + |
| 26 | +# Import Triton SDPA |
| 27 | +kernels_dir = os.path.join(os.path.dirname(__file__), "..", "triton", "kernels") |
| 28 | +sys.path.insert(0, os.path.abspath(kernels_dir)) |
| 29 | +from sdpa import sdpa |
| 30 | + |
| 31 | + |
| 32 | +def _benchmark_fn(fn, warmup=10, repeats=100): |
| 33 | + """Benchmark a function, return median time in microseconds.""" |
| 34 | + # Warmup |
| 35 | + for _ in range(warmup): |
| 36 | + fn() |
| 37 | + torch.cuda.synchronize() |
| 38 | + |
| 39 | + times = [] |
| 40 | + for _ in range(repeats): |
| 41 | + torch.cuda.synchronize() |
| 42 | + start = time.perf_counter() |
| 43 | + fn() |
| 44 | + torch.cuda.synchronize() |
| 45 | + end = time.perf_counter() |
| 46 | + times.append((end - start) * 1e6) # microseconds |
| 47 | + |
| 48 | + times.sort() |
| 49 | + return times[len(times) // 2] # median |
| 50 | + |
| 51 | + |
| 52 | +def bench_config(B, H_q, H_kv, L_q, L_kv, D, has_mask=False): |
| 53 | + """Benchmark one configuration, return (repeat_interleave_us, pack_gqa_us).""" |
| 54 | + num_groups = H_q // H_kv |
| 55 | + |
| 56 | + torch.manual_seed(42) |
| 57 | + q = torch.randn(B, H_q, L_q, D, dtype=torch.bfloat16, device="cuda") |
| 58 | + k = torch.randn(B, H_kv, L_kv, D, dtype=torch.bfloat16, device="cuda") |
| 59 | + v = torch.randn(B, H_kv, L_kv, D, dtype=torch.bfloat16, device="cuda") |
| 60 | + |
| 61 | + if has_mask: |
| 62 | + mask = torch.ones(B, 1, L_q, L_kv, dtype=torch.bool, device="cuda") |
| 63 | + else: |
| 64 | + mask = None |
| 65 | + |
| 66 | + # Approach 1: repeat_interleave + MHA SDPA |
| 67 | + def fn_repeat(): |
| 68 | + k_exp = k.repeat_interleave(num_groups, dim=1) |
| 69 | + v_exp = v.repeat_interleave(num_groups, dim=1) |
| 70 | + if mask is not None: |
| 71 | + mask_exp = mask.expand(B, H_q, L_q, L_kv) |
| 72 | + return sdpa(q, k_exp, v_exp, attn_mask=mask_exp) |
| 73 | + return sdpa(q, k_exp, v_exp) |
| 74 | + |
| 75 | + # Approach 2: pack GQA SDPA |
| 76 | + def fn_pack_gqa(): |
| 77 | + return sdpa(q, k, v, attn_mask=mask, enable_gqa=True) |
| 78 | + |
| 79 | + t_repeat = _benchmark_fn(fn_repeat) |
| 80 | + t_pack = _benchmark_fn(fn_pack_gqa) |
| 81 | + |
| 82 | + return t_repeat, t_pack |
| 83 | + |
| 84 | + |
| 85 | +def main(): |
| 86 | + if not torch.cuda.is_available(): |
| 87 | + print("CUDA not available") |
| 88 | + return |
| 89 | + |
| 90 | + gpu_name = torch.cuda.get_device_name(0) |
| 91 | + print(f"GPU: {gpu_name}") |
| 92 | + print() |
| 93 | + |
| 94 | + configs = [ |
| 95 | + # Decode configs (L_q=1) — pack GQA should dominate |
| 96 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 1, "L_kv": 128, "D": 256, "label": "Qwen3.5 decode, ctx=128"}, |
| 97 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 1, "L_kv": 512, "D": 256, "label": "Qwen3.5 decode, ctx=512"}, |
| 98 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 1, "L_kv": 1024, "D": 256, "label": "Qwen3.5 decode, ctx=1024"}, |
| 99 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 1, "L_kv": 2048, "D": 256, "label": "Qwen3.5 decode, ctx=2048"}, |
| 100 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 1, "L_kv": 4096, "D": 256, "label": "Qwen3.5 decode, ctx=4096"}, |
| 101 | + |
| 102 | + # Decode with mask |
| 103 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 1, "L_kv": 1024, "D": 256, "label": "Qwen3.5 decode+mask, ctx=1024", "has_mask": True}, |
| 104 | + |
| 105 | + # Decode with different GQA ratios |
| 106 | + {"B": 1, "H_q": 32, "H_kv": 8, "L_q": 1, "L_kv": 2048, "D": 128, "label": "Llama-style 4:1 decode, ctx=2048"}, |
| 107 | + {"B": 1, "H_q": 8, "H_kv": 1, "L_q": 1, "L_kv": 2048, "D": 128, "label": "MQA 8:1 decode, ctx=2048"}, |
| 108 | + |
| 109 | + # Short seqlen (pack GQA should help) |
| 110 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 4, "L_kv": 1024, "D": 256, "label": "Qwen3.5 short L_q=4, ctx=1024"}, |
| 111 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 8, "L_kv": 1024, "D": 256, "label": "Qwen3.5 short L_q=8, ctx=1024"}, |
| 112 | + |
| 113 | + # Prefill configs (L_q=L_kv) — repeat_interleave should be comparable |
| 114 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 128, "L_kv": 128, "D": 256, "label": "Qwen3.5 prefill, L=128"}, |
| 115 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 512, "L_kv": 512, "D": 256, "label": "Qwen3.5 prefill, L=512"}, |
| 116 | + {"B": 1, "H_q": 16, "H_kv": 2, "L_q": 1024, "L_kv": 1024, "D": 256, "label": "Qwen3.5 prefill, L=1024"}, |
| 117 | + |
| 118 | + # Batch > 1 |
| 119 | + {"B": 4, "H_q": 16, "H_kv": 2, "L_q": 1, "L_kv": 1024, "D": 256, "label": "Qwen3.5 B=4 decode, ctx=1024"}, |
| 120 | + ] |
| 121 | + |
| 122 | + header = f"{'Config':<45} {'repeat_interleave':>18} {'pack_gqa':>12} {'Speedup':>10}" |
| 123 | + print(header) |
| 124 | + print("-" * len(header)) |
| 125 | + |
| 126 | + for cfg in configs: |
| 127 | + label = cfg.pop("label") |
| 128 | + has_mask = cfg.pop("has_mask", False) |
| 129 | + t_repeat, t_pack = bench_config(**cfg, has_mask=has_mask) |
| 130 | + speedup = t_repeat / t_pack |
| 131 | + print( |
| 132 | + f"{label:<45} {t_repeat:>14.1f} us {t_pack:>8.1f} us {speedup:>9.2f}x" |
| 133 | + ) |
| 134 | + |
| 135 | + print() |
| 136 | + print("Speedup > 1.0 means pack_gqa is faster.") |
| 137 | + print("Speedup < 1.0 means repeat_interleave is faster.") |
| 138 | + |
| 139 | + |
| 140 | +if __name__ == "__main__": |
| 141 | + main() |
0 commit comments