-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
08544c3
commit f074990
Showing
10 changed files
with
260 additions
and
128 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,47 +1,18 @@ | ||
from __future__ import print_function | ||
import numpy as np | ||
import keras | ||
from keras.models import load_model | ||
from aura.extractor_util import reshape | ||
from aura.extractor_util import parseAuraDimensions as pAD | ||
from aura.aura_loader import read_file | ||
from aura.aura_loader import get_data | ||
|
||
root = "../Aura_Data/"; | ||
cancerPath = root + "ChunkedCancerTestset/" | ||
healthyPath = root + "ChunkedHealthyTestset/" | ||
cancerSize = "{256x256x270}" | ||
healthySize = "{136x136x181}" | ||
model = load_model("Model-11.hf") | ||
|
||
cl,cw,cn = pAD(cancerSize) | ||
hl,hw,hn = pAD(healthySize) | ||
fl, fw = max(cl, cw, hl, hw), max(cl, cw, hl, hw) | ||
fn = cn + hn | ||
num_classes = 2 | ||
|
||
model = load_model("Model-v4.hf") | ||
|
||
cancerous_test_data = read_file(path=cancerPath + cancerSize + "Chunk9.aura").T | ||
healthy_test_data = read_file(path=healthyPath + healthySize + "Chunk9.aura") | ||
healthy_test_data = reshape(healthy_test_data, (fl,fw, hn)).T | ||
test_data = np.zeros((fn, fl,fw)) | ||
for i in range(cn): | ||
test_data[i] = cancerous_test_data[i] | ||
for i in range(hn): | ||
test_data[i + cn] = healthy_test_data[i] | ||
|
||
labels = np.zeros(fn) | ||
for i in range(cn): | ||
labels[i] = 1 | ||
|
||
x_test = test_data | ||
y_test = labels | ||
|
||
x_test = test_data.reshape(fn,fl,fw,1) | ||
|
||
model.compile(loss=keras.losses.sparse_categorical_crossentropy, | ||
optimizer=keras.optimizers.Adadelta(), | ||
metrics=['accuracy']) | ||
# Prepare paths for GCP training | ||
root = "../Aura_Data/" | ||
test_paths = [root + "{136x136x22118}HealthyTestset.aura", root + "{256x256x7021}RIDERTestset.aura", | ||
root + "{256x256x879}BTPTestset.aura"] | ||
test_data, test_label = get_data(test_paths) | ||
test_n, test_l, test_w = test_data.shape | ||
x_test = test_data.reshape(test_n, test_l, test_w, 1) | ||
y_test = test_label.copy() | ||
|
||
score = model.evaluate(x_test, y_test, verbose=0) | ||
print('Test loss:', score[0]) | ||
print('Test accuracy:', score[1]) | ||
print('Test accuracy:', score[1]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,25 +1,113 @@ | ||
import time | ||
import numpy | ||
import os | ||
from aura.extractor_util import parse_aura_dimensions | ||
import numpy as np | ||
from aura.extractor_util import reshape | ||
from aura.extractor_util import parse_aura_dimensions as pAD | ||
import random | ||
|
||
|
||
def read_file(path): | ||
""" | ||
Reads an aura file, converting it to numpy array. | ||
:param path: Path to aura file. | ||
:return: A numpy array. | ||
""" | ||
filename = path.split("/") | ||
filename = filename[len(filename) - 1] | ||
l, w, n = filename[filename.find("{") + 1: filename.rfind("}")].split("x") | ||
l, w, n = int(l), int(w), int(n) | ||
l, w, n = parse_aura_dimensions(filename) | ||
print("Loading " + filename + "...") | ||
initial = time.time() | ||
|
||
# Load unshaped array into numpy | ||
unshapedArray = numpy.fromfile(path, dtype=numpy.float16); | ||
unshaped_array = numpy.fromfile(path, dtype=numpy.float16); | ||
|
||
# Determine number of images by dividing the length of the unshaped array by the area of each image. | ||
num_of_images = int(len(unshapedArray) / (l * w)) | ||
num_of_images = int(len(unshaped_array) / (l * w)) | ||
if num_of_images != n: | ||
unshapedArray = numpy.fromfile(path); | ||
num_of_images = int(len(unshapedArray) / (l * w)) | ||
unshaped_array = numpy.fromfile(path); | ||
num_of_images = int(len(unshaped_array) / (l * w)) | ||
final = time.time() | ||
difference = final - initial | ||
print(num_of_images, "images loaded in", str(difference)[0:5], "seconds.") | ||
|
||
# Reshape the array to a 3D matrix. | ||
Array = unshapedArray.reshape(l, w, num_of_images) | ||
return Array | ||
return unshaped_array.reshape(l, w, num_of_images) | ||
|
||
|
||
# This function takes in a list of paths to extract data and converts it to a numpy array. | ||
def get_data(training_data_paths, shuffle=True): | ||
""" | ||
:param training_data_paths: a list of paths from which to extract data, shapes must be (l,w,n) | ||
:return: two numpy arrays with shuffled data, shape of (n,l,w), of data type numpy.float16 and a numpy array of shape (n) with labels | ||
n: number of images | ||
l: length of each image | ||
w: width of each image | ||
""" | ||
init_time = time() | ||
print("Retrieving data from " + str(training_data_paths.__len__()) + " paths.") | ||
sizes = [] | ||
l, w = pAD(training_data_paths[0][training_data_paths[0].find("{"):training_data_paths[0].find("}") + 1])[0:2] | ||
for filename in training_data_paths: | ||
print("Recording dimensions of " + filename) | ||
""" | ||
fl: file length | ||
fw: file width | ||
fn: file number of images | ||
""" | ||
fl, fw, fn = pAD(filename[filename.find("{"):filename.find("}") + 1]) | ||
if fl > l: | ||
l = fl | ||
if fw > w: | ||
w = fw | ||
sizes.append(fn) | ||
n = sum(sizes) | ||
print(str(n) + " images found.") | ||
# train_data is a numpy array of (n,l,w) with data type numpy.float16 | ||
train_data = np.zeros((n, l, w), dtype=np.float16) | ||
|
||
# Load in all data | ||
print("Loading data.") | ||
data = [] | ||
for size, path in enumerate(training_data_paths): | ||
raw_data = read_file(path=path) | ||
raw_data = reshape(raw_data, (l, w, sizes[size])).T | ||
data.append(raw_data) | ||
|
||
# Compile data[] into output | ||
print("Compiling data into one array.") | ||
index_of_train_data = 0 | ||
for index, package in enumerate(data): | ||
for image in package: | ||
train_data[index_of_train_data] = image | ||
index_of_train_data += 1 | ||
|
||
# Label training data | ||
print("Labelling data.") | ||
data = [] | ||
index_of_train_data = 0 | ||
for size_index in range(sizes.__len__()): | ||
for index in range(sizes[size_index]): | ||
data.append((train_data[index_of_train_data], size_index)) | ||
index_of_train_data += 1 | ||
|
||
if shuffle: | ||
print("Shuffling data.") | ||
random.shuffle(data) | ||
|
||
print("Separating labels.") | ||
# Separate training images and labels | ||
labels = np.zeros(n) | ||
train_data = np.zeros((n, l, w)) | ||
for i, (data, label) in enumerate(data): | ||
train_data[i] = data | ||
labels[i] = label | ||
|
||
final_time = time() | ||
duration = final_time - init_time | ||
print("Data retrieval complete. Process took " + str(duration) + " seconds.") | ||
return train_data, labels |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.