-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathwmh_pytorch.py
executable file
·140 lines (115 loc) · 6.09 KB
/
wmh_pytorch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#!/usr/bin/env python
import torch
import nibabel as nib
import numpy as np
import torchvision
from tqdm import tqdm
from einops import rearrange
import sys
import pdb
import torchio as tio
in_path=sys.argv[1]
out_path=sys.argv[2]
gpu=sys.argv[3]
wmh_seg_home=sys.argv[4]
verbose=sys.argv[5]
pmb=sys.argv[6]
batch=np.int16(sys.argv[7])
fast=sys.argv[8]
def reduceSize(prediction):
arg = prediction > 0.5
out = np.zeros(prediction.shape)
out[arg] = 1
return out
def wmh_seg(in_path, out_path, train_transforms, device, mode):
if mode == "True":
img_orig = nib.load(in_path)
transform = tio.transforms.Resize((256,256,256))
img = transform(img_orig)
input = np.squeeze(img.get_fdata())
input = torch.tensor(input)
affine = img.affine
input = torch.unsqueeze(input, 1)
prediction_axial = np.zeros((256,256,256))
prediction_cor = np.zeros((256,256,256))
prediction_sag = np.zeros((256,256,256))
else:
img_orig = nib.load(in_path)
img = train_transforms(img_orig)
input = np.squeeze(img.get_fdata())
prediction_axial = np.zeros((256,256,256))
prediction_cor = np.zeros((256,256,256))
prediction_sag = np.zeros((256,256,256))
input = torch.tensor(input)
affine = img.affine
input = torch.unsqueeze(input, 1)
input = input.to(device)
prediction_input = input/torch.max(input)
print(f'Predicting.....')
if verbose == "True":
if fast == "True":
for idx in tqdm(range(input.shape[0]//batch)):
axial_img = rearrange(prediction_input[:,:,:,idx*batch:(idx+1)*batch], 'd0 d1 d2 d3 -> d3 d1 d0 d2').repeat(1,3,1,1)
prediction_axial[:,:,idx*batch:(idx+1)*batch] = rearrange(model(axial_img.float())[0:batch].detach().cpu().numpy(), 'd0 d1 d2 d3 -> d2 d3 (d0 d1)')
prediction = prediction_axial
else:
for idx in tqdm(range(input.shape[0]//batch)):
axial_img = rearrange(prediction_input[:,:,:,idx*batch:(idx+1)*batch], 'd0 d1 d2 d3 -> d3 d1 d0 d2').repeat(1,3,1,1)
cor_img = rearrange(prediction_input[:,:,idx*batch:(idx+1)*batch,:], 'd0 d1 d2 d3 -> d2 d1 d0 d3').repeat(1,3,1,1)
sag_img = rearrange(prediction_input[idx*batch:(idx+1)*batch,:,:,:], 'd0 d1 d2 d3 -> d0 d1 d2 d3').repeat(1,3,1,1)
stacked_input = torch.vstack((axial_img, cor_img, sag_img))
prediction_axial[:,:,idx*batch:(idx+1)*batch] = rearrange(model(stacked_input.float())[0:batch].detach().cpu().numpy(), 'd0 d1 d2 d3 -> d2 d3 (d0 d1)')
prediction_cor[:,idx*batch:(idx+1)*batch,:] = rearrange(model(stacked_input.float())[batch:2*batch].detach().cpu().numpy(), 'd0 d1 d2 d3 -> d2 (d0 d1) d3')
prediction_sag[idx*batch:(idx+1)*batch,:,:] = rearrange(model(stacked_input.float())[2*batch::].detach().cpu().numpy(), 'd0 d1 d2 d3 -> (d0 d1) d2 d3')
prediction = prediction_axial + prediction_cor + prediction_sag
elif verbose != "True":
if fast == "True":
for idx in range(input.shape[0]//batch):
axial_img = rearrange(prediction_input[:,:,:,idx*batch:(idx+1)*batch], 'd0 d1 d2 d3 -> d3 d1 d0 d2').repeat(1,3,1,1)
prediction_axial[:,:,idx*batch:(idx+1)*batch] = rearrange(model(axial_img.float())[0:batch].detach().cpu().numpy(), 'd0 d1 d2 d3 -> d2 d3 (d0 d1)')
prediction = prediction_axial
else:
for idx in range(input.shape[0]//batch):
axial_img = rearrange(prediction_input[:,:,:,idx*batch:(idx+1)*batch], 'd0 d1 d2 d3 -> d3 d1 d0 d2').repeat(1,3,1,1)
cor_img = rearrange(prediction_input[:,:,idx*batch:(idx+1)*batch,:], 'd0 d1 d2 d3 -> d2 d1 d0 d3').repeat(1,3,1,1)
sag_img = rearrange(prediction_input[idx*batch:(idx+1)*batch,:,:,:], 'd0 d1 d2 d3 -> d0 d1 d2 d3').repeat(1,3,1,1)
stacked_input = torch.vstack((axial_img, cor_img, sag_img))
prediction_axial[:,:,idx*batch:(idx+1)*batch] = rearrange(model(stacked_input.float())[0:batch].detach().cpu().numpy(), 'd0 d1 d2 d3 -> d2 d3 (d0 d1)')
prediction_cor[:,idx*batch:(idx+1)*batch,:] = rearrange(model(stacked_input.float())[batch:2*batch].detach().cpu().numpy(), 'd0 d1 d2 d3 -> d2 (d0 d1) d3')
prediction_sag[idx*batch:(idx+1)*batch,:,:] = rearrange(model(stacked_input.float())[2*batch::].detach().cpu().numpy(), 'd0 d1 d2 d3 -> (d0 d1) d2 d3')
prediction = prediction_axial + prediction_cor + prediction_sag
#saving images
out = reduceSize(prediction)
if gpu == "True":
img_fit = input.squeeze().cpu().numpy()
else:
img_fit = input.squeeze().detach().numpy()
transform = tio.transforms.Resize((img_orig.get_fdata().shape[0], img_orig.get_fdata().shape[1], img_orig.get_fdata().shape[2]))
out = transform(np.expand_dims(out, 0))
out = reduceSize(np.squeeze(out))
nii_seg = nib.Nifti1Image(out, affine=img_orig.affine)
nib.save(nii_seg, out_path)
filename=in_path.split('/')[-1]
ID=filename.split('.')[0]
if gpu == 'True':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Configuring model on GPU')
else:
device = torch.device('cpu')
print('Configuring model on CPU')
if pmb == "False":
train_transforms = tio.transforms.Resize((256,256,256))
model = torch.load(f"{wmh_seg_home}/ChallengeMatched_Unet_mit_b5.pth", map_location=device)
model.eval()
model.to(device)
wmh_seg(in_path, out_path, train_transforms, device, pmb)
elif pmb == "True":
# train_transforms = torchvision.transforms.Compose([
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Resize((256, 256,)),
# ])
train_transforms = tio.transforms.Resize((256,256,256))
model = torch.load(f"{wmh_seg_home}/pmb_2d_transformer_Unet_mit_b5.pth", map_location=device)
model.eval()
model.to(device)
wmh_seg(in_path, out_path, train_transforms, device, pmb)