Skip to content

Commit 5007be5

Browse files
Add files via upload
1 parent 2e8f2ea commit 5007be5

File tree

7 files changed

+710
-0
lines changed

7 files changed

+710
-0
lines changed

Diff for: ImageQualityMetrics/AES.py

+96
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
'''
2+
Simon Chemnitz Thomson's code to calculate the metric Average Edge Strength.
3+
4+
5+
Code is based on the article:
6+
Quantitative framework for prospective motion correction evaluation
7+
Nicolas Pannetier, Theano Stavrinos, Peter Ng, Michael Herbst,
8+
Maxim Zaitsev, Karl Young, Gerald Matson, and Norbert Schuff '''
9+
10+
import numpy as np
11+
from skimage.feature import canny
12+
from scipy.ndimage import convolve
13+
from utils import crop_img, bin_img
14+
15+
16+
def aes(img, brainmask = None, sigma=np.sqrt(2), n_levels = 128, bin = False, crop = True, weigt_avg = False):
17+
'''
18+
Parameters
19+
----------
20+
img : numpy array
21+
Image for which the metrics should be calculated.
22+
sigma : float
23+
Standard deviation of the Gaussian filter used
24+
during canny edge detection.
25+
n_levels : int
26+
Levels of intensities to bin image by
27+
bin : bool
28+
Whether or not to bin the image
29+
crop : bool
30+
Whether or not to crop image/ delete empty slices
31+
32+
Returns
33+
-------
34+
AES : float
35+
Average Edge Strength measure of the input image.
36+
'''
37+
#Apply brainmask if given one
38+
if brainmask is not None: #alternative type(brainmask) != type(None)
39+
img = img*brainmask
40+
#Crop image if crop is True
41+
if crop:
42+
img = crop_img(img)
43+
#Bin image if bin is True
44+
if bin:
45+
img = bin_img(img, n_levels = n_levels)
46+
#Centered Gradient kernel in the x-direction
47+
x_kern = np.array([[-1,-1,-1],
48+
[0,0,0],
49+
[1,1,1]])
50+
#Centered Gradient kernel in the y-direction
51+
y_kern = x_kern.T
52+
53+
#Shape of volume/img
54+
vol_shape = np.shape(img)
55+
56+
#Empty array to contain edge strenghts
57+
#Function returns the mean of this list
58+
es = []
59+
60+
#weights for each slice
61+
#proportion of non zero pixels
62+
weights = []
63+
64+
#Convert to float image
65+
img = img.astype(float) # deleted np.
66+
67+
#For each slice calcule the edge strength
68+
for slice in range(vol_shape[2]):
69+
#Slice to do operations on
70+
im_slice = img[:,:,slice]
71+
72+
#Weight, proportion of non zero pixels
73+
weights.append(np.mean(im_slice>0))
74+
75+
#Convolve slice
76+
x_conv = convolve(im_slice, x_kern)
77+
y_conv = convolve(im_slice, y_kern)
78+
#Canny edge detector
79+
canny_img = canny(im_slice, sigma = sigma)
80+
#Numerator and denominator, to be divided
81+
#defining the edge strength of the slice
82+
numerator = np.sum(canny_img*( x_conv**2 + y_conv**2 ))
83+
denominator = np.sum(canny_img)
84+
85+
#Calculate edge strength
86+
frac = np.sqrt(numerator)/denominator
87+
88+
#Append the edge strength
89+
es.append(frac)
90+
es = np.array(es)
91+
#Remove nans
92+
es = es[~np.isnan(es)]
93+
#Return the average edge strength
94+
if weigt_avg:
95+
return np.average(es, weights = weights)
96+
else: return np.mean(es)

Diff for: ImageQualityMetrics/CoEnt.py

+192
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,192 @@
1+
'''
2+
Simon Chemnitz Thomson's code to calculate the metric Co-occurence entropy
3+
4+
Code is based on the article:
5+
Quantitative framework for prospective motion correction evaluation
6+
Nicolas Pannetier, Theano Stavrinos, Peter Ng, Michael Herbst,
7+
Maxim Zaitsev, Karl Young, Gerald Matson, and Norbert Schuff'''
8+
9+
import numpy as np
10+
from skimage.feature.texture import greycomatrix
11+
from utils import bin_img, crop_img
12+
13+
14+
def coent3d(img, brainmask = None, n_levels = 128, bin = True, crop = True, supress_zero = True):
15+
'''
16+
Parameters
17+
----------
18+
img : numpy array
19+
Image for which the metrics should be calculated.
20+
n_levels : int
21+
Levels of intensities to bin image by
22+
bin : bool
23+
Whether or not to bin the image
24+
crop : bool
25+
Whether or not to crop image/ delete empty slices
26+
Returns
27+
-------
28+
CoEnt : float
29+
Co-Occurrence Entropy measure of the input image.
30+
'''
31+
#Apply brainmask if given one
32+
if brainmask is not None: #alternative type(brainmask) != type(None)
33+
img = img*brainmask
34+
#Crop image if crop is True
35+
if crop:
36+
img = crop_img(img)
37+
#Bin image if bin is True
38+
if bin:
39+
img = bin_img(img, n_levels=n_levels)
40+
#Scale imgage to have intensity values in [0,255]
41+
img = 255*(img/np.max(img))
42+
#Convert image to uint8
43+
# as greycomatrix prefers uint8 as input
44+
img = img.astype(np.uint8)
45+
46+
#Shape of the image/volume
47+
vol_shape = np.shape(img)
48+
49+
#Empty matrix that will be the Co-Occurence matrix
50+
#Note it is 256x256 as that is the shape of the
51+
#output of skimage.feature.greycomatrix
52+
#even though the image is binned
53+
co_oc_mat = np.zeros((256,256))
54+
55+
56+
"""
57+
Note: For 3D encoded images the slice axis does not matter.
58+
"""
59+
#Generate 2d co-ent matrix for each slice
60+
for i in range(vol_shape[0]):
61+
#Temporary co-ent matrix
62+
tmp_co_oc_mat = greycomatrix(img[i,:,:],
63+
distances = [1],
64+
angles = [0*(np.pi/2),
65+
1*(np.pi/2),
66+
2*(np.pi/2),
67+
3*(np.pi/2)])
68+
#greycomatrix will generate 4d array
69+
#The value P[i,j,d,theta] is the number of times
70+
#that grey-level j occurs at a distance d and
71+
#at an angle theta from grey-level i
72+
#as we only have one distance we just use
73+
#tmp_co_oc_mat[:,:,0,:]
74+
#As we want the total occurence not split on angles
75+
#we sum over axis 2.
76+
tmp_co_oc_mat = np.sum(tmp_co_oc_mat[:,:,0,:], axis = 2)
77+
#add the occurrences to the co-entropy matrix
78+
co_oc_mat = co_oc_mat + tmp_co_oc_mat
79+
80+
#Generate 2d co-ent matrix for each slice
81+
# to capture co-occurrence in the direction we sliced before
82+
for j in range(vol_shape[1]):
83+
#temporary co-ent matrix
84+
#note only pi,-pi as angles
85+
tmp_co_oc_mat = greycomatrix(img[:,j,:],
86+
distances = [1],
87+
angles = [1*(np.pi/2),
88+
3*(np.pi/2)])
89+
#greycomatrix will generate 4d array
90+
#The value P[i,j,d,theta] is the number of times
91+
#that grey-level j occurs at a distance d and
92+
#at an angle theta from grey-level i
93+
#as we only have one distance we just use
94+
#tmp_co_oc_mat[:,:,0,:]
95+
#As we want the total occurence not split on angles
96+
#we sum over axis 2.
97+
tmp_co_oc_mat = np.sum(tmp_co_oc_mat[:,:,0,:], axis = 2)
98+
#add the occurrences to the co-entropy matrix
99+
co_oc_mat = co_oc_mat + tmp_co_oc_mat
100+
#Divide by 6 to get average occurance
101+
co_oc_mat = (1/6)*co_oc_mat
102+
if supress_zero:
103+
co_oc_mat[0,0] = 0
104+
#Normalise
105+
co_oc_mat = co_oc_mat/np.sum(co_oc_mat)
106+
#Take log2 to get entropy
107+
log_matrix = np.log2(co_oc_mat)
108+
#Return the entropy
109+
return -np.nansum(co_oc_mat*log_matrix)
110+
111+
112+
def coent2d(img, brainmask = None, n_levels = 128, bin = True, crop = True, supress_zero = True):
113+
#Apply brainmask if given one
114+
if brainmask is not None: #alternative type(brainmask) != type(None)
115+
img = img*brainmask
116+
#Crop image if crop is True
117+
if crop:
118+
img = crop_img(img)
119+
#Bin image if bin is True
120+
if bin:
121+
img = bin_img(img, n_levels=n_levels)
122+
#Scale imgage to have intensity values in [0,255]
123+
img = 255*(img/np.max(img))
124+
#Convert image to uint8
125+
# as greycomatrix prefers uint8 as input
126+
img = img.astype(np.uint8)
127+
128+
#Shape of the image/volume
129+
vol_shape = np.shape(img)
130+
ents = []
131+
for slice in range(vol_shape[2]):
132+
tmp_co_oc_mat = greycomatrix(img[:,:,slice],
133+
distances = [1],
134+
angles = [0*(np.pi/2),
135+
1*(np.pi/2),
136+
2*(np.pi/2),
137+
3*(np.pi/2)])
138+
#greycomatrix will generate 4d array
139+
#The value P[i,j,d,theta] is the number of times
140+
#that grey-level j occurs at a distance d and
141+
#at an angle theta from grey-level i
142+
#as we only have one distance we just use
143+
#tmp_co_oc_mat[:,:,0,:]
144+
#As we want the total occurence not split on angles
145+
#we sum over axis 2.
146+
tmp_co_oc_mat = np.sum(tmp_co_oc_mat[:,:,0,:], axis = 2)
147+
if supress_zero:
148+
tmp_co_oc_mat[0,0] = 0
149+
tmp_co_oc_mat = tmp_co_oc_mat/np.sum(tmp_co_oc_mat)
150+
log_matrix = np.log2(tmp_co_oc_mat)
151+
152+
ents.append( -np.nansum(tmp_co_oc_mat*log_matrix) )
153+
154+
155+
156+
return np.nanmean(ents)
157+
158+
159+
def coent(img, brainmask = None, n_levels = 128, bin = True, crop = True, supress_zero = True):
160+
'''
161+
Parameters
162+
----------
163+
img : numpy array
164+
Image for which the metrics should be calculated.
165+
n_levels : int
166+
Levels of intensities to bin image by
167+
bin : bool
168+
Whether or not to bin the image
169+
crop : bool
170+
Whether or not to crop image/ delete empty slices
171+
Returns
172+
173+
-------
174+
CoEnt : float
175+
Co-Occurrence Entropy measure of the input image.
176+
'''
177+
178+
#Check which function to use:
179+
180+
#Shape of the volume image
181+
img_vol = np.shape(img)
182+
183+
#Working under the assumption that the
184+
#third axis contains the slices,
185+
#eg a 2d encoded image
186+
#would have shape (256,256,k)
187+
#where k is the slice number
188+
#additional assumption: 2d encoded seq does not have more than 100 slices
189+
#and 3d encoded does not have less than 100 slices.
190+
if img_vol[2]<100:
191+
return coent2d(img, brainmask, n_levels, bin, crop, supress_zero)
192+
else: return coent3d(img, brainmask, n_levels, bin, crop, supress_zero)

Diff for: ImageQualityMetrics/GradientEntropy.py

+54
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
2+
'''
3+
Hannah Eichhorn's code to calculate the metric Gradient Entropy.
4+
5+
The code is based on:
6+
McGee K, Manduca A, Felmlee J et al. Image metric-based correction
7+
(autocorrection) of motion effects: analysis of image metrics. J Magn Reson
8+
Imaging. 2000; 11(2):174-181
9+
10+
'''
11+
12+
import numpy as np
13+
from scipy.stats import entropy
14+
from scipy.ndimage import sobel
15+
16+
17+
def gradent(img, bm=[]):
18+
'''
19+
Parameters
20+
----------
21+
img : numpy array
22+
image for which the metrics should be calculated.
23+
bm : numpy array or list, optional
24+
If a non-empty numpy array is given, this brainmask will be used to
25+
mask the images before calculating the metrics. If an empty list is
26+
given, no mask is applied. The default is [].
27+
28+
Returns
29+
-------
30+
ge : float
31+
Gradient Entropy of the input image.
32+
'''
33+
34+
# image needs to be in floating point numbers in order for gradient to be
35+
# correctly calculated
36+
img = img.astype(float)
37+
38+
# calulate gradients:
39+
grad_x = sobel(img,axis=0,mode='reflect')
40+
grad_y = sobel(img,axis=1,mode='reflect')
41+
grad_z = sobel(img,axis=2,mode='reflect')
42+
nabla_ab = np.sqrt(grad_x**2+grad_y**2+grad_z**2) # maybe needs to be normalized
43+
44+
if len(bm) > 0:
45+
grad = nabla_ab.flatten()[bm.flatten()!=0]
46+
else:
47+
grad = nabla_ab.flatten()
48+
49+
_, counts = np.unique(grad, return_counts=True)
50+
ge = entropy(counts, base=2)
51+
52+
return ge
53+
54+

Diff for: ImageQualityMetrics/ImageEntropy.py

+44
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
2+
'''
3+
Hannah Eichhorn's code to calculate the metric Image Entropy.
4+
5+
The code is based on:
6+
McGee K, Manduca A, Felmlee J et al. Image metric-based correction
7+
(autocorrection) of motion effects: analysis of image metrics. J Magn Reson
8+
Imaging. 2000; 11(2):174-181
9+
'''
10+
11+
import numpy as np
12+
from scipy.stats import entropy
13+
14+
15+
def iment(img, bm=[]):
16+
'''
17+
Parameters
18+
----------
19+
img : numpy array
20+
image for which the metrics should be calculated.
21+
bm : numpy array or list, optional
22+
If a non-empty numpy array is given, this brainmask will be used to
23+
mask the images before calculating the metrics. If an empty list is
24+
given, no mask is applied. The default is [].
25+
26+
Returns
27+
-------
28+
ie : float
29+
Image Entropy of the input image.
30+
'''
31+
32+
33+
# flatten and brainmask the image
34+
if len(bm) > 0:
35+
image = img.flatten()[bm.flatten!=0]
36+
else:
37+
image = img.flatten()
38+
39+
_, counts = np.unique(image, return_counts=True)
40+
ie = entropy(counts, base=2)
41+
42+
return ie
43+
44+

0 commit comments

Comments
 (0)