Skip to content

Commit 6267762

Browse files
author
CI
committed
fix(ci): repair python CI and lint for llm-speed
Made-with: Cursor
1 parent 4bfb2d5 commit 6267762

7 files changed

Lines changed: 15 additions & 12 deletions

File tree

.github/workflows/ci.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,10 @@ jobs:
6868
cache: pip
6969

7070
- name: Install test dependencies
71-
run: pip install pytest hypothesis torch --index-url https://download.pytorch.org/whl/cpu
71+
run: |
72+
pip install -U pip
73+
pip install pytest hypothesis
74+
pip install torch --index-url https://download.pytorch.org/whl/cpu
7275
7376
- name: Run tests (skip CUDA)
7477
run: pytest tests/ -v -m "not cuda" --tb=short || true

benchmarks/benchmark_attention.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import argparse
88
import json
99
import torch
10-
from typing import List, Dict, Tuple
10+
from typing import List, Dict
1111
import sys
1212
from pathlib import Path
1313
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
@@ -205,7 +205,7 @@ def main():
205205

206206
dtype = torch.float16 if args.dtype == 'fp16' else torch.float32
207207

208-
print(f"Configuration:")
208+
print("Configuration:")
209209
print(f" Batch size: {args.batch_size}")
210210
print(f" Num heads: {args.num_heads}")
211211
print(f" Head dim: {args.head_dim}")

benchmarks/benchmark_gemm.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,12 @@ def compute_gemm_flops(M: int, N: int, K: int) -> int:
4848

4949

5050
def benchmark_gemm(
51-
sizes: List[Tuple[int, int, int]] = [(1024, 1024, 1024), (2048, 2048, 2048),
52-
(4096, 4096, 4096), (8192, 8192, 8192)],
51+
sizes: List[Tuple[int, int, int]] = [
52+
(1024, 1024, 1024),
53+
(2048, 2048, 2048),
54+
(4096, 4096, 4096),
55+
(8192, 8192, 8192),
56+
],
5357
dtype: torch.dtype = torch.float16,
5458
warmup: int = 10,
5559
iterations: int = 100
@@ -213,7 +217,7 @@ def main():
213217

214218
dtype = torch.float16 if args.dtype == 'fp16' else torch.float32
215219

216-
print(f"Configuration:")
220+
print("Configuration:")
217221
print(f" Sizes: {sizes}")
218222
print(f" Dtype: {args.dtype}")
219223
print(f" Warmup: {args.warmup}")

python/profiler.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
from __future__ import annotations
66

77
import torch
8-
import time
98
from dataclasses import dataclass
109
from typing import Callable
1110
from enum import Enum

tests/conftest.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import pytest
66
import torch
77
import numpy as np
8-
from typing import Tuple
98

109

1110
def pytest_configure(config):

tests/test_attention.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55

66
import pytest
77
import torch
8-
import numpy as np
9-
from hypothesis import given, settings, strategies as st, assume
8+
from hypothesis import given, settings, strategies as st
109

1110
from conftest import assert_close, compute_attention_reference
1211

tests/test_gemm.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55

66
import pytest
77
import torch
8-
import numpy as np
9-
from hypothesis import given, settings, strategies as st, assume
8+
from hypothesis import given, settings, strategies as st
109

1110
from conftest import assert_close
1211

0 commit comments

Comments
 (0)