-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathsegmentation_lax_4c.py
More file actions
169 lines (136 loc) · 6 KB
/
segmentation_lax_4c.py
File metadata and controls
169 lines (136 loc) · 6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
"""Example script to perform segmentation on LAX 4C images using fine-tuned checkpoint."""
import io
from pathlib import Path
import imageio
import matplotlib.pyplot as plt
import numpy as np
import SimpleITK as sitk # noqa: N813
import torch
from monai.transforms import ScaleIntensityd
from PIL import Image
from scipy.spatial.distance import cdist
from skimage import measure
from tqdm import tqdm
from cinema import ConvUNetR
def post_process(labels: np.ndarray) -> np.ndarray:
"""Remove artifact by choosing predictions closet to RV."""
processed = np.zeros_like(labels)
# rv no processing
rv_mask = labels == 1
processed[rv_mask] = 1
# myo and lv
for i in [2, 3]:
mask = labels == i
labeled_mask = measure.label(mask)
bc = np.bincount(labeled_mask.flat, weights=mask.flat)
# find closest component
closest = None
d_max = np.inf
for j, c in enumerate(bc):
if c > 0:
d = cdist(np.argwhere(rv_mask), np.argwhere(labeled_mask == j), "minkowski", p=1.0).min()
if d < d_max:
closest = j
processed[labeled_mask == closest] = i
return processed
def plot_segmentations(images: np.ndarray, labels: np.ndarray, filepath: Path) -> None:
"""Plot segmentations as animated GIF.
Args:
images: (x, y, 1, t)
labels: (x, y, t)
filepath: path to save the GIF file.
"""
n_frames = labels.shape[-1]
frames = []
for t in tqdm(range(n_frames), desc="Creating GIF frames"):
# Create individual frame
fig, ax = plt.subplots(figsize=(5, 5), dpi=150)
# Plot image
ax.imshow(images[..., 0, t], cmap="gray")
# Plot segmentation overlays
ax.imshow((labels[..., t, None] == 1) * np.array([108 / 255, 142 / 255, 191 / 255, 0.6]))
ax.imshow((labels[..., t, None] == 2) * np.array([214 / 255, 182 / 255, 86 / 255, 0.6]))
ax.imshow((labels[..., t, None] == 3) * np.array([130 / 255, 179 / 255, 102 / 255, 0.6]))
# Remove axes
ax.set_xticks([])
ax.set_yticks([])
# Render figure to numpy array using BytesIO (universal across backends)
buf = io.BytesIO()
plt.savefig(buf, format="png", bbox_inches="tight", pad_inches=0, dpi=150)
buf.seek(0)
img = Image.open(buf)
frame = np.array(img.convert("RGB"))
frames.append(frame)
buf.close()
plt.close(fig)
# Create GIF directly from memory arrays
with imageio.get_writer(filepath, mode="I", duration=50, loop=0) as writer:
for frame in tqdm(frames, desc="Creating GIF"):
writer.append_data(frame)
def plot_volume_changes(labels: np.ndarray, filepath: Path) -> None:
"""Plot volume changes.
Args:
labels: (x, y, t)
filepath: path to save the PNG file.
"""
n_frames = labels.shape[-1]
xs = np.arange(n_frames)
rv_volumes = np.sum(labels == 1, axis=(0, 1)) * 10 / 1000
myo_volumes = np.sum(labels == 2, axis=(0, 1)) * 10 / 1000
lv_volumes = np.sum(labels == 3, axis=(0, 1)) * 10 / 1000
lvef = (max(lv_volumes) - min(lv_volumes)) / max(lv_volumes) * 100
rvef = (max(rv_volumes) - min(rv_volumes)) / max(rv_volumes) * 100
fig, ax = plt.subplots(figsize=(4, 4), dpi=120)
ax.plot(xs, rv_volumes, color="#6C8EBF", label="Right Ventricle")
ax.plot(xs, myo_volumes, color="#D6B656", label="Myocardium")
ax.plot(xs, lv_volumes, color="#82B366", label="Left Ventricle")
ax.set_xlabel("Frame")
ax.set_ylabel("Area (mm2)")
ax.set_title(f"LVEF = {lvef:.2f}%\nRVEF = {rvef:.2f}%")
ax.legend(loc="upper center", bbox_to_anchor=(0.5, 1))
fig.tight_layout()
fig.savefig(filepath, dpi=300, bbox_inches="tight")
plt.close(fig)
def run(trained_dataset: str, seed: int, device: torch.device, dtype: torch.dtype) -> None:
"""Run segmentation on LAX 4C images using fine-tuned checkpoint."""
# load model
view = "lax_4c"
model = ConvUNetR.from_finetuned(
repo_id="mathpluscode/CineMA",
model_filename=f"finetuned/segmentation/{trained_dataset}_{view}/{trained_dataset}_{view}_{seed}.safetensors",
config_filename=f"finetuned/segmentation/{trained_dataset}_{view}/config.yaml",
)
model.eval()
model.to(device)
# load sample data and form a batch of size 1
transform = ScaleIntensityd(keys=view)
# (x, y, 1, t)
exp_dir = Path(__file__).parent.parent.resolve()
images = np.transpose(sitk.GetArrayFromImage(sitk.ReadImage(exp_dir / "data/mnms2/lax_4c_ed.nii.gz")))
images = images[..., None, :] # (x, y) -> (x, y, 1, t)
n_frames = images.shape[-1]
labels_list = []
for t in tqdm(range(n_frames), total=n_frames):
batch = transform({view: torch.from_numpy(images[None, ..., 0, t])})
batch = {k: v[None, ...].to(device=device, dtype=dtype) for k, v in batch.items()}
with torch.no_grad(), torch.autocast("cuda", dtype=dtype, enabled=torch.cuda.is_available()):
logits = model(batch)[view] # (1, 4, x, y)
labels = torch.argmax(logits, dim=1)[0].detach().to(torch.float32).cpu().numpy() # (x, y)
# the model seems to hallucinate an additional right ventricle and myocardium sometimes
# find the connected component that is closest to left ventricle
labels = post_process(labels)
labels_list.append(labels)
labels = np.stack(labels_list, axis=-1) # (x, y, t)
# visualise segmentations
plot_segmentations(images, labels, Path(f"segmentation_{view}_animation_{trained_dataset}_{seed}.gif"))
# visualise area changes
plot_volume_changes(labels, Path(f"segmentation_{view}_mask_area_{trained_dataset}_{seed}.png"))
if __name__ == "__main__":
dtype, device = torch.float32, torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda")
if torch.cuda.is_bf16_supported():
dtype = torch.bfloat16
trained_dataset = "mnms2"
for seed in range(3):
run(trained_dataset, seed, device, dtype)