Skip to content

Commit be2b00e

Browse files
authored
Add files via upload
Uploading the latest version of ECGMOD
1 parent bf40df3 commit be2b00e

File tree

4 files changed

+76
-9
lines changed

4 files changed

+76
-9
lines changed

ECGMOD/DKLTuner.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -65,17 +65,14 @@ def DKLmodel(x_train, x_test, y_train, y_test, numy=1, n_iter=500,\
6565
"""
6666

6767
if GP == "DKL" :
68-
likelihood = gpytorch.likelihoods.GaussianLikelihood( noise_prior=gpytorch.priors.SmoothedBoxPrior(0.15, 1.5, sigma=0.5) )
68+
likelihood = gpytorch.likelihoods.GaussianLikelihood( ) # noise_prior=gpytorch.priors.SmoothedBoxPrior(0.15, 1.5, sigma=0.5)
6969
model = DKLRegressionModel(x_train, y_train, likelihood, kernel, x_train.size(-1), l1out, l2out, l3out, final)
7070

7171
if torch.cuda.is_available():
7272
model = model.cuda()
7373
likelihood = likelihood.cuda()
7474

7575

76-
model.train()
77-
likelihood.train()
78-
7976
#Define the parameters to optimize
8077
if GP == "DKL":
8178
optimizer = torch.optim.Adam([
@@ -85,9 +82,14 @@ def DKLmodel(x_train, x_test, y_train, y_test, numy=1, n_iter=500,\
8582
{'params': model.likelihood.parameters()},
8683
], lr=lr)
8784

85+
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
86+
8887
# "Loss" for GPs - the marginal log likelihood
8988
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
9089
#training_iter = 500
90+
91+
model.train()
92+
likelihood.train()
9193

9294
for i in range(n_iter):
9395
optimizer.zero_grad()
@@ -98,6 +100,7 @@ def DKLmodel(x_train, x_test, y_train, y_test, numy=1, n_iter=500,\
98100
if i % 100 == 0 or i == n_iter - 1:
99101
print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
100102
optimizer.step()
103+
scheduler.step()
101104

102105
mse, r2, predictions = InferGPModel(likelihood, model, x_test, y_test)
103106
gpnoise = estimate_gpnoise(likelihood, verbose=True)

ECGMOD/GPmodels.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -162,15 +162,18 @@ def InferGPModel(likelihood, model, Xinfer, YTrue, scaleY=None):
162162
rmse, r2, predictions: float, float, tensor
163163
"""
164164
from sklearn.metrics import mean_squared_error, r2_score
165-
import numpy
166-
if type(Xinfer) == numpy.ndarray:
165+
import numpy as np
166+
167+
if type(Xinfer) == np.ndarray:
167168
use_cuda = torch.cuda.is_available()
168169
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
169170
Xinfer = torch.from_numpy(Xinfer).type(dtype)
170171
YTrue = torch.from_numpy(YTrue).type(dtype)
172+
171173
# Set into eval mode
172174
model.eval()
173175
likelihood.eval()
176+
174177
with torch.no_grad(), gpytorch.settings.fast_pred_var():
175178
predictions = likelihood(model(Xinfer))
176179
mean = predictions.mean
@@ -189,7 +192,7 @@ def InferGPModel(likelihood, model, Xinfer, YTrue, scaleY=None):
189192
mse = mean_squared_error(YTrue,mean)
190193
r2 = r2_score(YTrue,mean)
191194

192-
return numpy.sqrt(mse), r2, predictions
195+
return np.sqrt(mse), r2, predictions
193196

194197

195198
def estimate_gpnoise(likelihood, verbose=True):

ECGMOD/multiT_preprocess.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
def multiT_data_loader(jsonfile, NormalizeY=False, NormalizeX=False):
2+
"""
3+
Input Args:
4+
jsonfile : JSON configuration filename (str)
5+
NormalizeY : default : False (Bool)
6+
NormalizeX : default : False (Bool)
7+
dataset : default : 'SciAdv' (Str)
8+
return:
9+
train_x : train descriptor array (array)
10+
train_y : train target array (array)
11+
test_x : test descriptor array (array)
12+
test_y : test target array (array)
13+
numy : multi-task number(int)
14+
scalerY : None if not normalized
15+
scalerX : None if not normalized
16+
"""
17+
import json, os
18+
import pandas as pd
19+
import numpy as np
20+
from sklearn.preprocessing import StandardScaler
21+
22+
config_json = json.load(open(jsonfile))
23+
#config_json = jsonfile
24+
path = config_json['path']
25+
train_file = config_json["train_file"]
26+
test_file = config_json["test_file"]
27+
x_slice = config_json["x_slice"]
28+
y_ind = config_json["y_slice"] ## Assuming single task learning. Hence just an index.
29+
30+
df_train = np.load( os.path.join(path,train_file ) ).astype('float32')
31+
df_test = np.load(os.path.join(path,test_file ) ).astype('float32')
32+
33+
train_x, train_y = df_train[:, x_slice[0] : x_slice[1] ], df_train[:, y_ind]
34+
test_x, test_y = df_test[:, x_slice[0] : x_slice[1] ], df_test[:, y_ind]
35+
36+
if len(test_y.shape) == 1 :
37+
train_y = train_y.reshape(-1, 1)
38+
test_y = test_y.reshape(-1, 1)
39+
numy = 1
40+
else:
41+
numy = test_y.shape[1]
42+
43+
scalerY = None
44+
scalerX = None
45+
46+
if NormalizeY:
47+
scalerY = StandardScaler()
48+
train_y = scalerY.fit_transform( train_y )
49+
test_y = scalerY.transform(test_y)
50+
51+
if NormalizeX:
52+
scalerX = StandardScaler()
53+
train_x = scalerX.fit_transform( train_x )
54+
test_x = scalerX.transform(test_x)
55+
56+
return train_x, train_y, test_x, test_y, numy, scalerY, scalerX

ECGMOD/postprocess.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
def plot_metric_write(train_size,err_list,r2_list, err_unscaled ,r2_unscaled , indlist):
1+
def plot_metric_write(train_size,err_list,r2_list, err_unscaled ,r2_unscaled , indlist, tag=None):
22
'''
33
Plot the metric/ write evolution history over the trial using this function
44
:param train_size: (list)
@@ -7,6 +7,7 @@ def plot_metric_write(train_size,err_list,r2_list, err_unscaled ,r2_unscaled , i
77
:param err_unscaled : (list) Error rescaled to energy scale
88
:param r2_unscaled : (list)
99
:param indlist: (list)
10+
:param tag: (str) String to tag to output file name
1011
Return type: figure object
1112
'''
1213
import matplotlib.pyplot as plt
@@ -52,9 +53,13 @@ def plot_metric_write(train_size,err_list,r2_list, err_unscaled ,r2_unscaled , i
5253
data = np.array([xarray, yarray, zarray, array_err_unscaled, array_r2_unscaled])
5354
data = data.T
5455

55-
datafile_path = "AL_history.dat"
5656
indexarray_path = "AL_train_Array.dat"
5757

58+
if tag == None:
59+
datafile_path = "AL_history.dat"
60+
elif tag != None:
61+
datafile_path = "AL_history_{}.dat".format(tag)
62+
5863
with open(datafile_path, 'w+') as datafile_id:
5964
np.savetxt(datafile_id, data, fmt=['%d','%f','%f','%f','%f'])
6065

0 commit comments

Comments
 (0)