diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fbfa7d1 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +**/*.pyc diff --git a/.ipynb_checkpoints/cerebro-checkpoint.ipynb b/.ipynb_checkpoints/cerebro-checkpoint.ipynb new file mode 100644 index 0000000..278b360 --- /dev/null +++ b/.ipynb_checkpoints/cerebro-checkpoint.ipynb @@ -0,0 +1,717 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "#### Written and Copyright by Mohit Agarwal\n", + "#### Georgia Institute of Technology\n", + "#### Email: me.agmohit@gmail.com\n", + "\n", + "## Please cite the following publication if you are using the codes for your study and publication\n", + "# Agarwal, Mohit, and Raghupathy Sivakumar. \n", + "# \"Cerebro: A Wearable Solution to Detect and Track User Preferences using Brainwaves.\" \n", + "# The 5th ACM Workshop on Wearable Systems and Applications. ACM, 2019.\n", + "\n", + "### Code to decode Product Preference data [10 products] and rank it\n", + "### Uses fix rule based on N200, Min and ERSP features" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "##Importing Libraries\n", + "\n", + "import numpy as np\n", + "import math\n", + "import scipy.io\n", + "import os\n", + "from sklearn.decomposition import FastICA\n", + "import mne\n", + "from mne.time_frequency import psd_multitaper\n", + "import matplotlib.pyplot as plt\n", + "import random\n", + "import itertools\n", + "from scipy import stats\n", + "\n", + "from sklearn import svm\n", + "from sklearn.neighbors import KNeighborsClassifier\n", + "from sklearn.linear_model import LinearRegression\n", + "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n", + "from sklearn.ensemble import RandomForestClassifier\n", + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.linear_model import ElasticNet\n", + "from sklearn.model_selection import cross_val_score\n", + "from sklearn.metrics import confusion_matrix\n", + "from sklearn.metrics import accuracy_score\n", + "\n", + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "mne.set_log_level('ERROR')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "## Parameters\n", + "\n", + "chan_list = ['Fp1','F3','F7','C3','T7','P3','P7','O1','Pz','Fp2','Fz','F4','F8','Cz','C4','T8','P4','P8','O2']\n", + "\n", + "selected_chan = [1,3,8,10,11,13,14]\n", + "total_chan = len(selected_chan)\n", + "\n", + "total_sub = 14 # Anything from 1-14\n", + "total_prod = 10 \n", + "freq = 256.0\n", + "time_len = 768\n", + "\n", + "time = [(x-freq)*1.0/freq for x in xrange(1,time_len+1)]\n", + "time = np.array(time)\n", + "\n", + "n200_ind = [idx for idx, t_ind in enumerate(time) if (t_ind>=0.2 and t_ind<=0.3)]\n", + "n200_ind = np.array(n200_ind)\n", + "\n", + "erp_ind = [idx for idx, t_ind in enumerate(time) if (t_ind>=0.2 and t_ind<=0.3)]\n", + "erp_ind = np.array(erp_ind)\n", + "erp_len = len(erp_ind)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Loading data\n", + "data_dict = {}\n", + "data_dir = 'data/'\n", + "list_of_files = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]\n", + "for curr_file in list_of_files:\n", + " if '_data.mat' in curr_file:\n", + " sub_id = int(curr_file[1:3])\n", + " data_dict[sub_id] = scipy.io.loadmat(os.path.join(data_dir,curr_file))\n", + " if curr_file == 'WA2.mat':\n", + " WA2 = scipy.io.loadmat(os.path.join(data_dir,curr_file))['WA2']\n", + " WA2 = np.delete(WA2,11,0) # to remove 12th subject\n", + " if curr_file == 'outChoices.mat':\n", + " outChoices = scipy.io.loadmat(os.path.join(data_dir,curr_file))['out']\n", + "# WA2 contains 14(sub) x 10 (prod) : value represents # of times the product was chosen total\n", + "# outChoices : column 0: if prod_i was chosen? column 7: sub_id*100+prod_1 , col 8: sub_id*100+prod_2\n", + "choices = np.zeros([total_sub+1, total_prod+1, total_prod+1])\n", + "for idx in range(outChoices.shape[0]):\n", + " sub_id = int(outChoices[idx, 7]//100)\n", + " sub_id_2 = int(outChoices[idx, 8]//100)\n", + " if sub_id == 12:\n", + " continue\n", + " if sub_id > 12:\n", + " sub_id = sub_id - 1\n", + " sub_id_2 = sub_id_2 - 1\n", + " assert sub_id>0 and sub_id <= (total_sub+1) and sub_id == sub_id_2, \"Error 1: error decoding\"+str(sub_id)\n", + " prod_1 = int(outChoices[idx, 7]%100)\n", + " prod_2 = int(outChoices[idx, 8]%100)\n", + " assert prod_1 > 0 and prod_1 <= total_prod and prod_2 > 0 and prod_2 <= total_prod, \"Error 2: error decoding \"+str(prod_2)\n", + " if prod_1 > prod_2 or prod_1==prod_2:\n", + " print \"check it baby\", prod_1, prod_2\n", + " if outChoices[idx, 0] == 0:\n", + " choices[sub_id, prod_1, prod_2] = choices[sub_id, prod_1, prod_2] + 1\n", + " elif outChoices[idx, 0] == 1:\n", + " choices[sub_id, prod_2, prod_1] = choices[sub_id, prod_2, prod_1] + 1\n", + " \n", + " \n", + "pref = np.zeros([total_sub+1, total_prod+1])\n", + "pref[1:,1:] = WA2\n", + "#data_dict[sub_id]['sig'], ['lab'] contains original eeg signals (25x768x500) and labels (1x500)\n", + "# What are channels?" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "ncomp = 1\n", + "sig_data = np.zeros([total_sub+1, total_prod+1, 768, 7])\n", + "ica_data = np.zeros([total_sub+1, total_prod+1, 768, 1])\n", + "n200_val = np.zeros([total_sub+1, total_prod+1])\n", + "ersp_val = np.zeros([total_sub+1, total_prod+1])\n", + "feat_val = np.zeros([total_sub+1, total_prod+1])\n", + "for sub_id in range(1,total_sub+1):\n", + " sig = data_dict[sub_id]['sig']\n", + " lab = data_dict[sub_id]['lab'].flatten()\n", + " for prod_id in range(1,total_prod+1):\n", + " sig_prod = sig[selected_chan, :, np.argwhere(lab==prod_id)] \n", + " avg_sig_chan = np.transpose(np.mean(sig_prod, axis=0))\n", + " # compute ICA\n", + " ica_sig = FastICA(n_components=ncomp)\n", + " S_ = ica_sig.fit_transform(avg_sig_chan) # Get the estimated sources\n", + " A_sig = ica_sig.mixing_ # Get estimated mixing matrix\n", + " A_sig_norm = np.linalg.norm(A_sig,axis=0)\n", + " S_ = S_*A_sig_norm\n", + " \n", + " if sum(A_sig)<0:\n", + " S_ = -1*S_\n", + " \n", + " \n", + " \n", + " n200val = np.mean(S_[n200_ind])\n", + " featval = min(S_[erp_ind])\n", + " \n", + " info = mne.create_info(ch_names=['ica'], sfreq=freq, ch_types=['eeg'])\n", + " raw = mne.io.RawArray(np.transpose(S_[256:]), info)\n", + " psds, freqs = psd_multitaper(raw, low_bias=True, tmin=0.1, tmax=0.5, fmin=13, fmax=26, proj=True, n_jobs=1)\n", + " raw2 = mne.io.RawArray(np.transpose(S_), info)\n", + " psds2, freqs2 = psd_multitaper(raw2, low_bias=True, tmin=0.5, tmax=1.0, fmin=13, fmax=26, proj=True, n_jobs=1)\n", + " erspval = np.mean(10*np.log10(psds)) - np.mean(10*np.log10(psds2))\n", + "\n", + " n200_val[sub_id, prod_id] = 10*np.log10(1 + abs(n200val)**2)\n", + " ersp_val[sub_id, prod_id] = erspval\n", + " feat_val[sub_id, prod_id] = 10*np.log10(1 + featval**2)\n", + " \n", + " sig_data[sub_id, prod_id, :, :] = avg_sig_chan\n", + " ica_data[sub_id, prod_id, :, :] = S_\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pairwise Accuracy [Max - Mean - std]\n", + "Validation \t 0.2531710586881039 0.21766517997334361 0.016270837736846972\n", + "Train \t 0.19017083810008625 0.15890894110548684 0.014288618519160958\n", + "Test \t 0.06923970322558004 -0.02751868787131508 0.03926872634262003\n", + "Ranking Accuracy Mean [Train - Test - Random]\n", + "Dist \t [0.46996599 0.67698413 0.88809524]\n", + "Tau \t [0.51007937 0.27103175 0.00595238]\n", + "NDCG \t [0.95695327 0.92002218 0.87499508]\n", + "Ranking Accuracy Max [Train - Test ]\n", + "Dist \t [0.36462585 0.33333333]\n", + "Tau \t [0.62585034 0.66666667]\n", + "NDCG \t [0.97544729 0.97425431]\n", + "Ranking Accuracy Std [Train - Test]\n", + "Dist \t [0.0393621 0.11796065]\n", + "Tau \t [0.04224192 0.1390764 ]\n", + "NDCG \t [0.00860341 0.03838353]\n" + ] + } + ], + "source": [ + "# Ranking Evaluation\n", + "\n", + "nsim = 1\n", + "\n", + "train_split = 7\n", + "test_split = 10 - train_split\n", + "\n", + "train_split_cv = 6\n", + "test_split_cv = train_split - train_split_cv\n", + "\n", + "total_splits = int(scipy.special.comb(10, train_split))\n", + "\n", + "total_samples_train = int(scipy.special.comb(train_split, 2))*2\n", + "total_samples_test = int(scipy.special.comb(test_split, 2))\n", + "\n", + "total_samples_train_cv = int(scipy.special.comb(train_split_cv, 2))*2\n", + "total_samples_test_cv = int(scipy.special.comb(test_split_cv, 2))\n", + "total_samples_train_rank_cv = int(scipy.special.comb(train_split_cv, 3))\n", + "\n", + "total_samples_train_rank = int(scipy.special.comb(train_split, 3))\n", + "total_samples_test_rank = int(scipy.special.comb(test_split, 3))\n", + "\n", + "X_train, y_train = np.zeros([total_samples_train, 3]), np.zeros([total_samples_train,])\n", + "X_train_rank, y_train_rank, y_train_rel = np.zeros([total_samples_train_rank, 9]), np.zeros([total_samples_train_rank,3]), np.zeros([total_samples_train_rank,3])\n", + "\n", + "X_test, y_test = np.zeros([total_samples_test, 3]), np.zeros([total_samples_test,])\n", + "X_test_rank, y_test_rank, y_test_rel = np.zeros([total_samples_test_rank, 9]), np.zeros([total_samples_test_rank, 3]), np.zeros([total_samples_test_rank, 3])\n", + "\n", + "rank_metrics = np.zeros([total_sub + 1, total_splits, 3, 3]) # rows: ML-train, ML-test, random col: avg dist, tau, NDCG\n", + "rank_count = np.zeros([total_sub + 1, total_splits, 3, 3])\n", + "\n", + "# Function to calculate NDCG score\n", + "def ndcg_score(rel, t_rank, p_rank):\n", + " dcg = sum(rel*1.0/np.log2(p_rank + 1))\n", + " idcg = sum(rel*1.0/np.log2(t_rank + 1))\n", + " return dcg*1.0/idcg\n", + "\n", + "def cv_train(sub_id, train_prod):\n", + "# Step 2: Preparing Training Dataset\n", + " cv_train_acc = []\n", + " for cv_train_prod_s in itertools.combinations(train_prod, train_split_cv):\n", + " cv_test_prod = list(set(train_prod)-set(cv_train_prod_s))\n", + " cv_train_prod = list(cv_train_prod_s)\n", + "\n", + " X_train_cv, y_train_cv = np.zeros([total_samples_train_cv, 3]), np.zeros([total_samples_train_cv,])\n", + " X_train_rank_cv, y_train_rank_cv, y_train_rel_cv = np.zeros([total_samples_train_rank_cv, 9]), np.zeros([total_samples_train_rank_cv,3]), np.zeros([total_samples_train_rank_cv,3])\n", + "# X_test_cv, y_test_cv = np.zeros([total_samples_test_cv, 3]), np.zeros([total_samples_test_cv,])\n", + "\n", + " sample_idx_train_cv = 0\n", + " for i in range(train_split_cv):\n", + " for j in range(i+1, train_split_cv):\n", + " prod_i, prod_j = cv_train_prod[i], cv_train_prod[j]\n", + "\n", + " y_train_cv[sample_idx_train_cv] = np.sign(pref[sub_id, prod_i] - pref[sub_id, prod_j])\n", + " X_train_cv[sample_idx_train_cv,:] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]]) - np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + "\n", + " sample_idx_train_cv = sample_idx_train_cv + 1\n", + " y_train_cv[sample_idx_train_cv] = np.sign(pref[sub_id, prod_j] - pref[sub_id, prod_i])\n", + " X_train_cv[sample_idx_train_cv,:] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]]) - np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + "\n", + " sample_idx_train_cv = sample_idx_train_cv + 1\n", + "\n", + " idx_train_cv = range(total_samples_train_cv)\n", + " random.shuffle(idx_train_cv)\n", + " X_train_cv = X_train_cv[idx_train_cv, :]\n", + " y_train_cv = y_train_cv[idx_train_cv, ]\n", + "\n", + " # Step 2A: Preparing Training dataset for training evaluation\n", + " sample_idx_train_rank_cv = 0\n", + " for i in range(train_split_cv):\n", + " for j in range(i+1, train_split_cv):\n", + " for k in range(j+1, train_split_cv):\n", + " prod_i, prod_j, prod_k = cv_train_prod[i], cv_train_prod[j], cv_train_prod[k]\n", + " if not ((pref[sub_id, prod_i] == pref[sub_id, prod_j]) or (pref[sub_id, prod_i] == pref[sub_id, prod_k]) or (pref[sub_id, prod_j] == pref[sub_id, prod_k])):\n", + "\n", + " ground_pref_cv = [pref[sub_id, prod_i], pref[sub_id, prod_j], pref[sub_id, prod_k]] # [rank_i, rank_j, rank_k]\n", + "# print ground_pref_cv\n", + " ground_ordr_cv = np.argsort(ground_pref_cv)\n", + " ground_rank_cv = 3 - np.argsort(ground_ordr_cv) # rank 1 means top, rank 3 means bottom\n", + " y_train_rank_cv[sample_idx_train_rank_cv,:] = ground_rank_cv\n", + " y_train_rel_cv[sample_idx_train_rank_cv,:] = ground_pref_cv\n", + "\n", + " X_train_rank_cv[sample_idx_train_rank_cv,0:3] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + " X_train_rank_cv[sample_idx_train_rank_cv,3:6] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + " X_train_rank_cv[sample_idx_train_rank_cv,6:9] = np.array([n200_val[sub_id, prod_k], feat_val[sub_id, prod_k], ersp_val[sub_id, prod_k]])\n", + " sample_idx_train_rank_cv = sample_idx_train_rank_cv + 1\n", + "\n", + "\n", + "\n", + " clf = ElasticNet(l1_ratio=0.05, alpha=0.1, normalize=True)\n", + " clf.fit(X_train_cv, y_train_cv) \n", + "\n", + " cv_train_acc.append(clf.score(X_train_cv, y_train_cv))\n", + "\n", + " for eval_idx_cv in range(sample_idx_train_rank_cv):\n", + " # Distance Metric\n", + "\n", + " pred_pref_cv = [clf.predict(np.reshape(X_train_rank_cv[eval_idx_cv,0:3],[1,-1]))[0], clf.predict(np.reshape(X_train_rank_cv[eval_idx_cv,3:6],[1,-1]))[0], clf.predict(np.reshape(X_train_rank_cv[eval_idx_cv,6:9],[1,-1]))[0]]\n", + " ord_pref_cv = np.argsort(pred_pref_cv)\n", + " pred_rank_cv = 3 - np.argsort(ord_pref_cv)\n", + " true_rank_cv = y_train_rank_cv[eval_idx_cv,]\n", + " dist_ml_train_cv = np.mean(abs(true_rank_cv - pred_rank_cv)) \n", + "\n", + " true_rel_cv = y_train_rel_cv[eval_idx_cv,]\n", + "\n", + " return cv_train_acc\n", + "\n", + "\n", + "\n", + "acc_metric = np.zeros([total_sub + 1, total_splits, 3])\n", + "acc_count = np.zeros([total_sub + 1, total_splits, 3])\n", + "\n", + "test_comb = []\n", + "\n", + "\n", + "\n", + "for sub_id in range(1,total_sub+1):\n", + " \n", + " prod_cnt = pref[sub_id,1:]\n", + " prod_ord = np.flip(np.argsort(prod_cnt))\n", + " sort_idx = 10 - np.argsort(prod_ord)\n", + " \n", + " for sim_idx in range(nsim):\n", + "\n", + " acc_idx = 0\n", + "\n", + " # Step 1: Choosing products for training/testing : All possible permutations/combinations\n", + " for subset in itertools.combinations(range(10), train_split):\n", + " train_prod = 1 + prod_ord[list(subset)]\n", + " \n", + " test_prod = list(set(1+prod_ord)-set(train_prod))\n", + " \n", + " subset_test = 1 + np.sort(list(set(range(10))-set(subset)))\n", + " \n", + "\n", + " if sub_id==2:\n", + " test_comb.append(subset_test)\n", + "\n", + " cv_train_acc = cv_train(sub_id, train_prod)#0\n", + " \n", + " \n", + " \n", + " sample_idx_train = 0\n", + " for i in range(train_split):\n", + " for j in range(i+1, train_split):\n", + " prod_i, prod_j = train_prod[i], train_prod[j]\n", + " if not pref[sub_id, prod_i] == pref[sub_id, prod_j]:\n", + "\n", + " y_train[sample_idx_train] = np.sign(pref[sub_id, prod_i] - pref[sub_id, prod_j])\n", + " X_train[sample_idx_train,:] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]]) - np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + "\n", + " sample_idx_train = sample_idx_train + 1\n", + " y_train[sample_idx_train] = np.sign(pref[sub_id, prod_j] - pref[sub_id, prod_i])\n", + " X_train[sample_idx_train,:] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]]) - np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + "\n", + " sample_idx_train = sample_idx_train + 1\n", + "\n", + " idx_train = range(total_samples_train)\n", + " random.shuffle(idx_train)\n", + " X_train = X_train[idx_train, :]\n", + " y_train = y_train[idx_train, ]\n", + " \n", + " # Step 2A: Preparing Training dataset for training evaluation\n", + " sample_idx_train_rank = 0\n", + " for i in range(train_split):\n", + " for j in range(i+1, train_split):\n", + " for k in range(j+1, train_split):\n", + " prod_i, prod_j, prod_k = train_prod[i], train_prod[j], train_prod[k]\n", + " if not ((pref[sub_id, prod_i] == pref[sub_id, prod_j]) or (pref[sub_id, prod_i] == pref[sub_id, prod_k]) or (pref[sub_id, prod_j] == pref[sub_id, prod_k])):\n", + " \n", + " ground_pref = [pref[sub_id, prod_i], pref[sub_id, prod_j], pref[sub_id, prod_k]] # [rank_i, rank_j, rank_k]\n", + " ground_ordr = np.argsort(ground_pref)\n", + " ground_rank = 3 - np.argsort(ground_ordr) # rank 1 means top, rank 3 means bottom\n", + " y_train_rank[sample_idx_train_rank,:] = ground_rank\n", + " y_train_rel[sample_idx_train_rank,:] = ground_pref\n", + " \n", + " X_train_rank[sample_idx_train_rank,0:3] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + " X_train_rank[sample_idx_train_rank,3:6] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + " X_train_rank[sample_idx_train_rank,6:9] = np.array([n200_val[sub_id, prod_k], feat_val[sub_id, prod_k], ersp_val[sub_id, prod_k]])\n", + " sample_idx_train_rank = sample_idx_train_rank + 1\n", + " \n", + " # Step 3: Preparing Testing Dataset for pairwise evaluation\n", + " sample_idx_test = 0\n", + " for i in range(test_split):\n", + " for j in range(i+1, test_split):\n", + " prod_i, prod_j = test_prod[i], test_prod[j]\n", + " if not pref[sub_id, prod_i] == pref[sub_id, prod_j]:\n", + " y_test[sample_idx_test] = np.sign(pref[sub_id, prod_i] - pref[sub_id, prod_j])\n", + " X_test[sample_idx_test,:] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]]) - np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + "\n", + " sample_idx_test = sample_idx_test + 1\n", + " \n", + " # Step 3B: Preparing Testing Dataset for Ranking\n", + " sample_idx_test_rank = 0\n", + " for i in range(test_split):\n", + " for j in range(i+1, test_split):\n", + " for k in range(j+1, test_split):\n", + " prod_i, prod_j, prod_k = test_prod[i], test_prod[j], test_prod[k]\n", + " if not ((pref[sub_id, prod_i] == pref[sub_id, prod_j]) or (pref[sub_id, prod_i] == pref[sub_id, prod_k]) or (pref[sub_id, prod_j] == pref[sub_id, prod_k])):\n", + " \n", + " ground_pref = [pref[sub_id, prod_i], pref[sub_id, prod_j], pref[sub_id, prod_k]] # [rank_i, rank_j, rank_k]\n", + " ground_ordr = np.argsort(ground_pref)\n", + " ground_rank = 3 - np.argsort(ground_ordr) # rank 1 means top, rank 3 means bottom\n", + " y_test_rank[sample_idx_test_rank,:] = ground_rank\n", + " y_test_rel[sample_idx_test_rank,:] = ground_pref\n", + " \n", + " X_test_rank[sample_idx_test_rank,0:3] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + " X_test_rank[sample_idx_test_rank,3:6] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + " X_test_rank[sample_idx_test_rank,6:9] = np.array([n200_val[sub_id, prod_k], feat_val[sub_id, prod_k], ersp_val[sub_id, prod_k]])\n", + "\n", + " sample_idx_test_rank = sample_idx_test_rank + 1\n", + "\n", + "\n", + " # Step 4: Computing Accuracy for Pairwise \n", + " clf = ElasticNet(l1_ratio=0.05, alpha=0.1, normalize=True) \n", + "\n", + " clf.fit(X_train, y_train) \n", + "\n", + " classification_acc = [np.mean(cv_train_acc), clf.score(X_train, y_train), clf.score(X_test, y_test)]\n", + " acc_metric[sub_id, acc_idx, :] = acc_metric[sub_id, acc_idx, :] + classification_acc\n", + " acc_count[sub_id, acc_idx, :] = acc_count[sub_id, acc_idx, :] + [1,1,1]\n", + " \n", + " # Step 5: Computing Accuracy for Ranking \n", + " \n", + " \n", + " coef = clf.coef_.ravel() / np.linalg.norm(clf.coef_) \n", + " \n", + " # Step 5A: For Training Data\n", + " dist_ml_train, tau_ml_train, ndcg_ml_train = 0, 0, 0\n", + " for eval_idx in range(total_samples_train_rank):\n", + " # Distance Metric\n", + " pred_pref = [np.dot(X_train_rank[eval_idx,0:3],coef), np.dot(X_train_rank[eval_idx,3:6],coef), np.dot(X_train_rank[eval_idx,6:9],coef)]\n", + " ord_pref = np.argsort(pred_pref)\n", + " pred_rank = 3 - np.argsort(ord_pref)\n", + " true_rank = y_train_rank[eval_idx,]\n", + " dist_ml_train = dist_ml_train + np.mean(abs(true_rank - pred_rank)) \n", + " # Tau Metric\n", + " tau, _ = stats.kendalltau(pred_rank, true_rank)\n", + " tau_ml_train = tau_ml_train + tau\n", + " # NDCG Metric\n", + " true_rel = y_train_rel[eval_idx,]\n", + " ndcg_ml_train = ndcg_ml_train + ndcg_score(true_rel, true_rank, pred_rank)\n", + " \n", + "\n", + " rank_metrics[sub_id, acc_idx, 0, 0] = rank_metrics[sub_id, acc_idx, 0, 0] + dist_ml_train*1.0/total_samples_train_rank\n", + " rank_metrics[sub_id, acc_idx, 0, 1] = rank_metrics[sub_id, acc_idx, 0, 1] + tau_ml_train*1.0/total_samples_train_rank\n", + " rank_metrics[sub_id, acc_idx, 0, 2] = rank_metrics[sub_id, acc_idx, 0, 2] + ndcg_ml_train*1.0/total_samples_train_rank\n", + " rank_count[sub_id, acc_idx, 0, 0] = rank_count[sub_id, acc_idx, 0, 0] + 1\n", + " rank_count[sub_id, acc_idx, 0, 1] = rank_count[sub_id, acc_idx, 0, 1] + 1\n", + " rank_count[sub_id, acc_idx, 0, 2] = rank_count[sub_id, acc_idx, 0, 2] + 1\n", + " \n", + " dist_ml_test, tau_ml_test, ndcg_ml_test = 0, 0, 0\n", + " dist_rand, tau_rand, ndcg_rand = 0, 0, 0\n", + " for eval_idx in range(total_samples_test_rank):\n", + " pred_pref = [np.dot(X_test_rank[eval_idx,0:3],coef), np.dot(X_test_rank[eval_idx,3:6],coef), np.dot(X_test_rank[eval_idx,6:9],coef)] \n", + "\n", + " ord_pref = np.argsort(pred_pref)\n", + " pred_rank = 3 - np.argsort(ord_pref)\n", + " true_rank = y_test_rank[eval_idx,]\n", + " dist_ml_test = dist_ml_test + np.mean(abs(true_rank - pred_rank))\n", + " tau, _ = stats.kendalltau(pred_rank, true_rank)\n", + " tau_ml_test = tau_ml_test + tau\n", + " true_rel = y_test_rel[eval_idx,]\n", + " ndcg_ml_test = ndcg_ml_test + ndcg_score(true_rel, true_rank, pred_rank)\n", + " \n", + " pred_rand = np.random.uniform(0,1,[1,3])\n", + " ordr_rand = np.argsort(pred_rand)\n", + " rank_rand = 3 - np.argsort(ordr_rand)\n", + " rank_rand = rank_rand.flatten()\n", + " rank_worst = np.array([true_rank[1], true_rank[0], true_rank[2]])\n", + " dist_rand = dist_rand + np.mean(abs(true_rank - rank_rand))\n", + " tau, _ = stats.kendalltau(rank_rand, true_rank)\n", + " tau_rand = tau_rand + tau\n", + " ndcg_rand = ndcg_rand + ndcg_score(true_rel, true_rank, rank_rand)\n", + " \n", + " rank_metrics[sub_id, acc_idx, 1, 0] = rank_metrics[sub_id, acc_idx, 1, 0] + dist_ml_test*1.0/total_samples_test_rank\n", + " rank_metrics[sub_id, acc_idx, 1, 1] = rank_metrics[sub_id, acc_idx, 1, 1] + tau_ml_test*1.0/total_samples_test_rank\n", + " rank_metrics[sub_id, acc_idx, 1, 2] = rank_metrics[sub_id, acc_idx, 1, 2] + ndcg_ml_test*1.0/total_samples_test_rank\n", + " rank_count[sub_id, acc_idx, 1, 0] = rank_count[sub_id, acc_idx, 1, 0] + 1 \n", + " rank_count[sub_id, acc_idx, 1, 1] = rank_count[sub_id, acc_idx, 1, 1] + 1\n", + " rank_count[sub_id, acc_idx, 1, 2] = rank_count[sub_id, acc_idx, 1, 2] + 1\n", + " \n", + " rank_metrics[sub_id, acc_idx, 2, 0] = rank_metrics[sub_id, acc_idx, 2, 0] + dist_rand*1.0/total_samples_test_rank\n", + " rank_metrics[sub_id, acc_idx, 2, 1] = rank_metrics[sub_id, acc_idx, 2, 1] + tau_rand*1.0/total_samples_test_rank\n", + " rank_metrics[sub_id, acc_idx, 2, 2] = rank_metrics[sub_id, acc_idx, 2, 2] + ndcg_rand*1.0/total_samples_test_rank\n", + " rank_count[sub_id, acc_idx, 2, 0] = rank_count[sub_id, acc_idx, 2, 0] + 1 \n", + " rank_count[sub_id, acc_idx, 2, 1] = rank_count[sub_id, acc_idx, 2, 1] + 1\n", + " rank_count[sub_id, acc_idx, 2, 2] = rank_count[sub_id, acc_idx, 2, 2] + 1\n", + " \n", + " acc_idx = acc_idx + 1\n", + "\n", + " \n", + " \n", + "# acc_metric = np.swapaxes(acc_metric, 0,1)\n", + "# acc_count = np.swapaxes(acc_count, 0,1)\n", + "# rank_metrics = np.swapaxes(rank_metrics, 0,1) \n", + "# rank_count = np.swapaxes(rank_count, 0,1)\n", + "\n", + "\n", + "\n", + "acc_metric[1:,:,:] = acc_metric[1:,:,:]/acc_count[1:,:,:]\n", + "rank_metrics[1:,:,:,:] = rank_metrics[1:,:,:,:]/rank_count[1:,:,:,:]\n", + "\n", + "\n", + "\n", + "# acc_metric_persub = acc_metric\n", + "acc_metric_subavg = np.mean(acc_metric[1:,:,:],axis=0)\n", + "# rank_metrics_persub = rank_metrics\n", + "rank_metrics_subavg = np.mean(rank_metrics[1:,:,:,:],axis=0)\n", + "\n", + "print \"Pairwise Accuracy [Max - Mean - std]\"\n", + "print \"Validation \\t \", max(acc_metric_subavg[:,0]), np.mean(acc_metric_subavg[:,0]), np.std(acc_metric_subavg[:,0])\n", + "print \"Train \\t \", max(acc_metric_subavg[:,1]), np.mean(acc_metric_subavg[:,1]), np.std(acc_metric_subavg[:,1])\n", + "print \"Test \\t \", max(acc_metric_subavg[:,2]), np.mean(acc_metric_subavg[:,2]), np.std(acc_metric_subavg[:,2])\n", + "\n", + "print \"Ranking Accuracy Mean [Train - Test - Random]\"\n", + "print \"Dist \\t \", np.mean(rank_metrics_subavg[:,:,0],axis=0)\n", + "print \"Tau \\t \", np.mean(rank_metrics_subavg[:,:,1],axis=0)\n", + "print \"NDCG \\t \", np.mean(rank_metrics_subavg[:,:,2],axis=0)\n", + "\n", + "# For random, max and std - does not make sense\n", + "print \"Ranking Accuracy Max [Train - Test ]\"\n", + "print \"Dist \\t \", np.min(rank_metrics_subavg[:,0:2,0],axis=0)\n", + "print \"Tau \\t \", np.max(rank_metrics_subavg[:,0:2,1],axis=0)\n", + "print \"NDCG \\t \", np.max(rank_metrics_subavg[:,0:2,2],axis=0)\n", + "\n", + "print \"Ranking Accuracy Std [Train - Test]\"\n", + "print \"Dist \\t \", np.std(rank_metrics_subavg[:,0:2,0],axis=0)\n", + "print \"Tau \\t \", np.std(rank_metrics_subavg[:,0:2,1],axis=0)\n", + "print \"NDCG \\t \", np.std(rank_metrics_subavg[:,0:2,2],axis=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Top 5 Subjects: Avg NDCG, std 0.9729758779950863 0.0007864680056827411\n", + "Top 5 Subjects: Avg MD, std 0.42857142857142855 0.05216405309573009\n", + "Top 5 Combinations: Avg NDCG, std 0.9609647540217887 0.0044628265143876475\n", + "Top 5 Combinations: Avg MD, std 0.4777777777777775 0.020786985482077337\n" + ] + } + ], + "source": [ + "avgrank_persub = np.mean(rank_metrics[1:,:,1,:], axis=0)\n", + "avg_ndcg_persub_top5 = np.sort(avgrank_persub[:,2])[-5:]\n", + "print \"Top 5 Subjects: Avg NDCG, std\", np.mean(avg_ndcg_persub_top5), np.std(avg_ndcg_persub_top5)\n", + "avg_md_persub_top5 = np.sort(avgrank_persub[:,0])[:5]\n", + "print \"Top 5 Subjects: Avg MD, std\", np.mean(avg_md_persub_top5), np.std(avg_md_persub_top5)\n", + "\n", + "avgrank_percomb = np.mean(rank_metrics[1:,:,1,:], axis=1)\n", + "avg_ndcg_percomb_top5 = np.sort(avgrank_percomb[:,2])[-5:]\n", + "print \"Top 5 Combinations: Avg NDCG, std\", np.mean(avg_ndcg_percomb_top5), np.std(avg_ndcg_percomb_top5)\n", + "avg_md_percomb_top5 = np.sort(avgrank_percomb[:,0])[:5]\n", + "print \"Top 5 Combinations: Avg MD, std\", np.mean(avg_md_percomb_top5), np.std(avg_md_percomb_top5)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAD8CAYAAACb4nSYAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAE5ZJREFUeJzt3X+s3fd91/Hnq9e10FZCguyWEcexJzmUIcqSnrlcQrq7hhQDU/IHU3XTTWsmUQvaBFSUokRIZXJUBTFNZWNWkdttIkDrRdYaeZDVsZJddVQ3la9pSrFNUs/d8DWDeGkyFIpw477545yrnZzYPsf2uffccz/Ph3R07vfz/Xyv3+er49f9nM/5/khVIUlqw9smXYAkae0Y+pLUEENfkhpi6EtSQwx9SWqIoS9JDTH0Jakhhr4kNcTQl6SGbJp0AYO2bNlSO3bsmHQZkjRVjh8//kdVtXVYv3UX+jt27GBpaWnSZUjSVEnyB6P0c3pHkhoyUugn2ZPkxSSnkzxyifWfSfJC7/FSktf61m1P8kySU0lOJtkxvvIlSVdj6PROkhlgP3APsAwcS3K4qk6u9KmqT/T1fwi4ve9XPAF8uqqOJnkH8P1xFS9JujqjjPR3A6er6kxVXQAOAvddof/9wBcBkvwIsKmqjgJU1etV9d3rrFmSdI1GCf2bgbN9y8u9trdIciuwE3iu13Qb8FqS30zy9SS/0PvkMLjd3iRLSZbOnz9/da9AkjSycX+ROw8cqqqLveVNwF3Aw8CPAT8MPDC4UVUdqKpOVXW2bh16xJEk6RqNEvrngFv6lrf12i5lnt7UTs8y8EJvaugN4CngjmspdBSLi/D4491nSdJbjXKc/jFgV5KddMN+HvjwYKck7wZuAhYHtr0xydaqOg98AFiVg/AXF+Huu+HCBdi8GZ59FmZnV+NfkqTpNXSk3xuhPwgcAU4BT1bViST7ktzb13UeOFh9N93tTfM8DDyb5JtAgM+N8wWsWFjoBv7Fi93nhYXV+FckabqNdEZuVT0NPD3Q9qmB5Z+/zLZHgfdcY30jm5vrjvBXRvpzc6v9L0rS9Fl3l2G4VrOz3SmdhYVu4Du1I0lvtWFCH7pBb9hL0uV57R1JaoihL0kNMfQlqSGGviQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9SWqIoS9JDTH0Jakhhr4kNcTQl6SGGPqS1BBDX5IaYuhLUkMMfUlqiKEvSQ0x9CWpIYa+JDVkpNBPsifJi0lOJ3nkEus/k+SF3uOlJK8NrL8hyXKSXxlX4ZKkq7dpWIckM8B+4B5gGTiW5HBVnVzpU1Wf6Ov/EHD7wK95DPjKWCqWJF2zUUb6u4HTVXWmqi4AB4H7rtD/fuCLKwtJ3gu8C3jmegqVJF2/UUL/ZuBs3/Jyr+0tktwK7ASe6y2/DfhF4OHrK1OSNA7j/iJ3HjhUVRd7yx8Dnq6q5SttlGRvkqUkS+fPnx9zSZKkFUPn9IFzwC19y9t6bZcyD3y8b3kWuCvJx4B3AJuTvF5Vb/oyuKoOAAcAOp1OjVi7JOkqjRL6x4BdSXbSDft54MODnZK8G7gJWFxpq6qf7lv/ANAZDHxJ0toZOr1TVW8ADwJHgFPAk1V1Ism+JPf2dZ0HDlaVI3VJWqey3jK60+nU0tLSpMuQpKmS5HhVdYb184xcSWqIoS9JDTH0Jakhhr4kNcTQl6SGGPqS1BBDX5IaYuhLUkMMfUlqiKEvSQ0x9CWpIYa+JDXE0Jekhhj6ktQQQ1+SGmLoS1JDDH1JaoihL0kNMfQlqSGGviQ1xNCXpIYY+kMsLsLjj3efJWnabRqlU5I9wC8BM8Dnq+qfD6z/DPATvcUfAN5ZVTcm+VHgs8ANwEXg01X1G+MqfrUtLsLdd8OFC7B5Mzz7LMzOTroqSbp2Q0M/yQywH7gHWAaOJTlcVSdX+lTVJ/r6PwTc3lv8LvCzVfWtJH8eOJ7kSFW9Ns4XsVoWFrqBf/Fi93lhwdCXNN1Gmd7ZDZyuqjNVdQE4CNx3hf73A18EqKqXqupbvZ//B/AysPX6Sl47c3PdEf7MTPd5bm7SFUnS9Rlleudm4Gzf8jLwvkt1THIrsBN47hLrdgObgd+7+jInY3a2O6WzsNANfEf5kqbdSHP6V2EeOFRVF/sbk/wQ8G+Bj1TV9wc3SrIX2Auwffv2MZd0fWZnDXtJG8co0zvngFv6lrf12i5lnt7UzookNwD/EfinVfX8pTaqqgNV1amqztatUzP7I0lTZ5TQPwbsSrIzyWa6wX54sFOSdwM3AYt9bZuBLwFPVNWh8ZQsSbpWQ0O/qt4AHgSOAKeAJ6vqRJJ9Se7t6zoPHKyq6mv7EPB+4IEkL/QePzrG+iVJVyFvzujJ63Q6tbS0NOkyJGmqJDleVZ1h/TwjV5IaYuhL0jqwVpd8Gfchm5Kkq7SWl3xxpC9JE3apS76sFkNfkiZsLS/54vSOJE3YWl7yxdCXpHVgrS754vSOJDXE0Jekhhj6ktQQQ1+SGmLoS1JDDH1JaoihL0kNMfQlqSGGviQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9SWqIoS9JDRkp9JPsSfJiktNJHrnE+s8keaH3eCnJa33rPpLkW73HR8ZZ/DRbq5sgS1K/oTdRSTID7AfuAZaBY0kOV9XJlT5V9Ym+/g8Bt/d+/rPAPwM6QAHHe9u+OtZXMWXW8ibIktRvlJH+buB0VZ2pqgvAQeC+K/S/H/hi7+e/CRytqu/0gv4osOd6Ct4I1vImyJLUb5TQvxk427e83Gt7iyS3AjuB565225as5U2QJanfuO+ROw8cqqqLV7NRkr3AXoDt27ePuaT1Zy1vgixJ/UYJ/XPALX3L23ptlzIPfHxg27mBbRcGN6qqA8ABgE6nUyPUNPXW6ibIktRvlOmdY8CuJDuTbKYb7IcHOyV5N3AT0H88yhHgg0luSnIT8MFemyRpAoaO9KvqjSQP0g3rGeDXqupEkn3AUlWt/AGYBw5WVfVt+50kj9H9wwGwr6q+M96XIEkaVfoyel3odDq1tLQ06TIkaaokOV5VnWH9PCNXkhpi6EtSQwx9SWqIoS9JDTH0Jakhhr4kNcTQl6SGGPqS1BBDX5IaYuhLUkMMfUlqiKEvSQ0x9CXpKiwuwuOPd5+n0bjvnCVJG9biItx9d/fe1ps3d++AN203Q3KkL0kjWljoBv7Fi93nhYVJV3T1DH1JGtHcXHeEPzPTfZ6bm3RFV8/pHUka0exsd0pnYaEb+NM2tQOGviRdldnZ6Qz7FU7vSFJDDH1JaoihL0kNMfQlqSEjhX6SPUleTHI6ySOX6fOhJCeTnEjyhb72f9FrO5Xkl5NkXMVLkq7O0KN3kswA+4F7gGXgWJLDVXWyr88u4FHgzqp6Nck7e+1/DbgTeE+v638CfhxYGOeLkKRLWVyc7sMrV8Moh2zuBk5X1RmAJAeB+4CTfX0+CuyvqlcBqurlXnsBfwrYDAR4O/C/xlO6JF3eRrhkwmoYZXrnZuBs3/Jyr63fbcBtSb6a5PkkewCqahH4HeAPe48jVXXq+suWpCvbCJdMWA3jOjlrE7ALmAO2AV9J8peBLcBf7LUBHE1yV1X9bv/GSfYCewG2b98+ppLa5MdZqWvlkgkrI/1pvGTCahgl9M8Bt/Qtb+u19VsGvlZV3wO+neQl/uSPwPNV9TpAkt8GZoE3hX5VHQAOAHQ6nbr6lyHw46zUbyNcMmE1jDK9cwzYlWRnks3APHB4oM9TdAOeJFvoTvecAf478ONJNiV5O90vcZ3eWSV+nJXebHYWHn3UwO83NPSr6g3gQeAI3cB+sqpOJNmX5N5etyPAK0lO0p3D/2RVvQIcAn4P+CbwDeAbVfVbq/A6xMa4AqCk1ZWq9TWb0ul0amlpadJlTC3n9KU2JTleVZ1h/bzK5gYz7VcAlLS6vAyDJDXE0Jekhhj6ktQQQ1+SGmLoS1JDDH1JaoihL0kNMfQlqSGGviQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9SWqIoS9JDTH0Jakhhr4kNcTQlzRxi4vw+OPdZ60ub5coaaIWF+Huu+HCBdi8GZ591lt+riZH+pImamGhG/gXL3afFxYmXdHGZuhLmqi5ue4If2am+zw3N+mKNraRQj/JniQvJjmd5JHL9PlQkpNJTiT5Ql/79iTPJDnVW79jPKVL2ghmZ7tTOo895tTOWhg6p59kBtgP3AMsA8eSHK6qk319dgGPAndW1atJ3tn3K54APl1VR5O8A/j+WF+BpKk3O2vYr5VRRvq7gdNVdaaqLgAHgfsG+nwU2F9VrwJU1csASX4E2FRVR3vtr1fVd8dWvSTpqowS+jcDZ/uWl3tt/W4Dbkvy1STPJ9nT1/5akt9M8vUkv9D75CBJmoBxfZG7CdgFzAH3A59LcmOv/S7gYeDHgB8GHhjcOMneJEtJls6fPz+mkiRJg0YJ/XPALX3L23pt/ZaBw1X1var6NvAS3T8Cy8ALvamhN4CngDsG/4GqOlBVnarqbN269Vpeh1aZJ89IG8MoJ2cdA3Yl2Uk37OeBDw/0eYruCP/Xk2yhO61zBngNuDHJ1qo6D3wAWBpX8VobnjwjbRxDR/q9EfqDwBHgFPBkVZ1Isi/Jvb1uR4BXkpwEfgf4ZFW9UlUX6U7tPJvkm0CAz63GC9Hq8eQZaeMY6TIMVfU08PRA26f6fi7gH/ceg9seBd5zfWVqklZOnlkZ6XvyjDS9vPaOhlo5eWZhoRv4Tu1I08vQ10g8eUbaGLz2jiQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9TYyXdpDWnodsaiK8tIM0GY70NRFe2kGaDENfE+F9UaXJcHpHE+GlHaTJMPQ1MV7aQVp7Tu9IUkMMfUlqiKEvSQ0x9CWpIYa+JDXE0Jekhhj6ktQQQ1+SGmLoS1JDRgr9JHuSvJjkdJJHLtPnQ0lOJjmR5AsD625IspzkV8ZRtCTp2gy9DEOSGWA/cA+wDBxLcriqTvb12QU8CtxZVa8meefAr3kM+Mr4ypYkXYtRRvq7gdNVdaaqLgAHgfsG+nwU2F9VrwJU1csrK5K8F3gX8Mx4SpauzJuzSJc3ygXXbgbO9i0vA+8b6HMbQJKvAjPAz1fVl5O8DfhF4GeAv3H95UpX5s1ZpCsb1xe5m4BdwBxwP/C5JDcCHwOerqrlK22cZG+SpSRL58+fH1NJapE3Z1ldfoqafqOM9M8Bt/Qtb+u19VsGvlZV3wO+neQlun8EZoG7knwMeAewOcnrVfWmL4Or6gBwAKDT6dQ1vRKJP7k5y8pI35uzjI+fojaGUUb6x4BdSXYm2QzMA4cH+jxFd5RPki10p3vOVNVPV9X2qtoBPAw8MRj40jit3JzlsccMpXHzU9TGMHSkX1VvJHkQOEJ3vv7XqupEkn3AUlUd7q37YJKTwEXgk1X1ymoWLl2ON2dZHX6K2hhStb5mUzqdTi0tLU26DEmXsLjoLS7XqyTHq6ozrJ+3S5Q0Mj9FTT8vwyBtQB5lo8txpC9tMB5loytxpC9tMB5loysx9KURTcuUycpRNjMzHmWjt3J6RxrBNE2ZrJyr4FE2uhRDXxrBpaZM1nOYepSNLsfpHWkETploo3CkL41gNadMPOFJa8nQl0a0GlMm0/RdgTYGp3ekCfLwSq01Q1+aIL8r0FpzekeaIA+v1Foz9KUJ8/BKrSWndySpIYa+JDXE0Jekhhj6ktQQQ1+SGmLoS1JD1t2N0ZOcB/7gOn7FFuCPxlTORuT+Gc59dGXun+EmsY9uraqtwzqtu9C/XkmWRrkjfKvcP8O5j67M/TPcet5HTu9IUkMMfUlqyEYM/QOTLmCdc/8M5z66MvfPcOt2H224OX1J0uVtxJG+JOkypjb0k8wk+XqS/3CJdQ8kOZ/khd7j702ixklK8vtJvtl7/UuXWJ8kv5zkdJL/kuSOSdQ5KSPsn7kkf9z3HvrUJOqcpCQ3JjmU5L8lOZVkdmB96++hYftnXb6HpvnSyv8IOAXccJn1v1FVD65hPevRT1TV5Y4V/lvArt7jfcBne88tudL+AfjdqvrJNatm/fkl4MtV9VNJNgM/MLC+9ffQsP0D6/A9NJUj/STbgL8DfH7StUyx+4Anqut54MYkPzTporQ+JPkzwPuBXwWoqgtV9dpAt2bfQyPun3VpKkMf+JfAPwG+f4U+f7f3kfNQklvWqK71pIBnkhxPsvcS628GzvYtL/faWjFs/wDMJvlGkt9O8pfWsrh1YCdwHvj13jTq55P84ECflt9Do+wfWIfvoakL/SQ/CbxcVcev0O23gB1V9R7gKPBv1qS49eWvV9UddD+CfzzJ+ydd0DozbP/8Z7qntf8V4F8BT611gRO2CbgD+GxV3Q78H+CRyZa0royyf9ble2jqQh+4E7g3ye8DB4EPJPl3/R2q6pWq+n+9xc8D713bEievqs71nl8GvgTsHuhyDuj/BLSt19aEYfunqv53Vb3e+/lp4O1Jtqx5oZOzDCxX1dd6y4fohly/lt9DQ/fPen0PTV3oV9WjVbWtqnYA88BzVfUz/X0G5hXvpfuFbzOS/GCSP73yM/BB4L8OdDsM/GzvCIy/CvxxVf3hGpc6EaPsnyR/Lkl6P++m+3/llbWudVKq6n8CZ5P8hV7T3cDJgW7NvodG2T/r9T00zUfvvEmSfcBSVR0G/mGSe4E3gO8AD0yytgl4F/Cl3vttE/CFqvpykr8PUFX/Gnga+NvAaeC7wM9NqNZJGGX//BTwD5K8AfxfYL7aO5PxIeDf945MOQP8nO+hNxm2f9ble8gzciWpIVM3vSNJunaGviQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9SWqIoS9JDfn/7EajSIEsm8sAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "## Plot the MD pattern as per combinations used in test rank\n", + "\n", + "chosen_metric = rank_metrics[:,:,1,0]\n", + "res = np.zeros([120*14,2])\n", + "\n", + "idx = 0\n", + "for sub_id in range(1, total_sub+1):\n", + " for comb_id in range(0,120):\n", + " prod_cnt = np.sort(pref[sub_id,1:])\n", + "# te_comb = test_comb[comb_id]-1\n", + "# tr_comb = list(set(range(10))-set(te_comb))\n", + "# myprod_pref = prod_cnt[te_comb]\n", + "# res[idx, 0] = np.mean(np.diff(myprod_pref))\n", + " te_comb = test_comb[comb_id]\n", + " tr_comb = list(set(range(1,11))-set(te_comb))\n", + "# print tr_comb, te_comb\n", + " res[idx, 0] = np.mean(tr_comb)\n", + " res[idx, 1] = chosen_metric[sub_id, comb_id]\n", + " idx = idx + 1\n", + "\n", + "resolution = 0.15\n", + "val_min = int((min(res[:,0]) + 0.5)*100)\n", + "val_max = int((max(res[:,0]) - 0.5)*100 + resolution*100)\n", + "interval = 1.0\n", + "\n", + "for idx in range(val_min,val_max,int(resolution*100)):\n", + " range_min = idx/100.0 - 0.5\n", + " range_max = idx/100.0 + 0.5\n", + " xx = (res[:,0]range_min)\n", + " yy = np.mean(res[np.argwhere(xx),1])\n", + " plt.plot(idx/100.0,yy,'b.')" + ] + }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "print rank_metrics[sub_idx, :, 0,2]" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.4714285714285715 0.8809523809523808\n", + "0.7304761904761905 0.19918367346938773\n" + ] + } + ], + "source": [ + "max_ind = 10\n", + "\n", + "best_acc, worst_acc = [], []\n", + "\n", + "best_acc_tr, worst_acc_tr = [], []\n", + "\n", + "for sub_idx in range(1, total_sub+1):\n", + " chosen_metric_test = (rank_metrics[sub_idx, :,1,0])\n", + "# chosen_metric_train = (rank_metrics[sub_idx, :,0,2])\n", + " chosen_metric_train = (rank_metrics[sub_idx, :, 0, 0])# - (acc_metric[sub_idx, :, 0])) #abs(rank_metrics[sub_idx, :, 0,2])\n", + "# chosen_metric_train = (rank_metrics[sub_idx, :, 0, 2]) #acc_metric[sub_idx, :, 0]\n", + "\n", + " sort_ind_train = np.argsort(chosen_metric_train)\n", + "\n", + "# print chosen_metric_train[sort_ind_train]\n", + "\n", + " worst_ind_train = sort_ind_train[:max_ind]\n", + " best_ind_train = list(sort_ind_train[-max_ind:])\n", + "\n", + " chosen_metric_test_val = np.array(chosen_metric_test).flatten()\n", + " chosen_metric_train_val = np.array(chosen_metric_train).flatten()\n", + "\n", + " best_train_idx = np.array(best_ind_train).flatten()\n", + " best_acc.append(np.mean(chosen_metric_test_val[best_train_idx]))\n", + " best_acc_tr.append(np.mean(chosen_metric_train_val[best_train_idx]))\n", + "\n", + " worst_train_idx = np.array(worst_ind_train).flatten()\n", + " worst_acc.append(np.mean(chosen_metric_test_val[worst_train_idx]))\n", + " worst_acc_tr.append(np.mean(chosen_metric_train_val[worst_train_idx]))\n", + " \n", + "# print best_acc\n", + "# print worst_acc\n", + "\n", + "print np.mean(best_acc), np.mean(worst_acc)\n", + "print np.mean(best_acc_tr), np.mean(worst_acc_tr)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/.ipynb_checkpoints/fixrule-checkpoint.ipynb b/.ipynb_checkpoints/fixrule-checkpoint.ipynb new file mode 100644 index 0000000..9b7ce67 --- /dev/null +++ b/.ipynb_checkpoints/fixrule-checkpoint.ipynb @@ -0,0 +1,421 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "#### Written and Copyright by Mohit Agarwal\n", + "#### Georgia Institute of Technology\n", + "#### Email: me.agmohit@gmail.com\n", + "\n", + "## Please cite the following publication if you are using the codes for your study and publication\n", + "# Agarwal, Mohit, and Raghupathy Sivakumar. \n", + "# \"Cerebro: A Wearable Solution to Detect and Track User Preferences using Brainwaves.\" \n", + "# The 5th ACM Workshop on Wearable Systems and Applications. ACM, 2019.\n", + "\n", + "### Code to decode Product Preference data [10 products] and rank it\n", + "### Uses fix rule based on N200, Min and ERSP features" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "##Importing Libraries\n", + "\n", + "import numpy as np\n", + "import scipy.io\n", + "import os\n", + "from sklearn.decomposition import FastICA\n", + "import mne\n", + "from mne.time_frequency import psd_multitaper\n", + "import matplotlib.pyplot as plt\n", + "from scipy import stats\n", + "\n", + "mne.set_log_level('ERROR')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "## Parameters\n", + "\n", + "chan_list = ['Fp1','F3','F7','C3','T7','P3','P7','O1','Pz','Fp2','Fz','F4','F8','Cz','C4','T8','P4','P8','O2']\n", + "\n", + "selected_chan = [1,3,8,10,11,13,14]\n", + "total_chan = len(selected_chan)\n", + "\n", + "total_sub = 14 # Anything from 1-14\n", + "total_prod = 10 \n", + "freq = 256.0\n", + "time_len = 768\n", + "\n", + "time = [(x-freq)*1.0/freq for x in xrange(1,time_len+1)]\n", + "time = np.array(time)\n", + "\n", + "n200_ind = [idx for idx, t_ind in enumerate(time) if (t_ind>=0.2 and t_ind<=0.3)]\n", + "n200_ind = np.array(n200_ind)\n", + "\n", + "erp_ind = [idx for idx, t_ind in enumerate(time) if (t_ind>=0.2 and t_ind<=0.3)]\n", + "erp_ind = np.array(erp_ind)\n", + "erp_len = len(erp_ind)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Loading data\n", + "data_dict = {}\n", + "data_dir = 'data/'\n", + "list_of_files = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]\n", + "for curr_file in list_of_files:\n", + " if '_data.mat' in curr_file:\n", + " sub_id = int(curr_file[1:3])\n", + " data_dict[sub_id] = scipy.io.loadmat(os.path.join(data_dir,curr_file))\n", + " if curr_file == 'WA2.mat':\n", + " WA2 = scipy.io.loadmat(os.path.join(data_dir,curr_file))['WA2']\n", + " WA2 = np.delete(WA2,11,0) # to remove 12th subject\n", + " if curr_file == 'outChoices.mat':\n", + " outChoices = scipy.io.loadmat(os.path.join(data_dir,curr_file))['out']\n", + "# WA2 contains 14(sub) x 10 (prod) : value represents # of times the product was chosen total\n", + "# outChoices : column 0: if prod_i was chosen? column 7: sub_id*100+prod_1 , col 8: sub_id*100+prod_2\n", + "choices = np.zeros([total_sub+1, total_prod+1, total_prod+1])\n", + "for idx in range(outChoices.shape[0]):\n", + " sub_id = int(outChoices[idx, 7]//100)\n", + " sub_id_2 = int(outChoices[idx, 8]//100)\n", + " if sub_id == 12:\n", + " continue\n", + " if sub_id > 12:\n", + " sub_id = sub_id - 1\n", + " sub_id_2 = sub_id_2 - 1\n", + " assert sub_id>0 and sub_id <= (total_sub+1) and sub_id == sub_id_2, \"Error 1: error decoding\"+str(sub_id)\n", + " prod_1 = int(outChoices[idx, 7]%100)\n", + " prod_2 = int(outChoices[idx, 8]%100)\n", + " assert prod_1 > 0 and prod_1 <= total_prod and prod_2 > 0 and prod_2 <= total_prod, \"Error 2: error decoding \"+str(prod_2)\n", + " if prod_1 > prod_2 or prod_1==prod_2:\n", + " print \"check it baby\", prod_1, prod_2\n", + " if outChoices[idx, 0] == 0:\n", + " choices[sub_id, prod_1, prod_2] = choices[sub_id, prod_1, prod_2] + 1\n", + " elif outChoices[idx, 0] == 1:\n", + " choices[sub_id, prod_2, prod_1] = choices[sub_id, prod_2, prod_1] + 1\n", + " \n", + " \n", + "pref = np.zeros([total_sub+1, total_prod+1])\n", + "pref[1:,1:] = WA2\n", + "#data_dict[sub_id]['sig'], ['lab'] contains original eeg signals (25x768x500) and labels (1x500)\n", + "# What are channels?" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "ncomp = 1\n", + "n200_val = np.zeros([total_sub+1, total_prod+1])\n", + "ersp_val = np.zeros([total_sub+1, total_prod+1])\n", + "feat_val = np.zeros([total_sub+1, total_prod+1])\n", + "for sub_id in range(1,total_sub+1):\n", + " sig = data_dict[sub_id]['sig']\n", + " lab = data_dict[sub_id]['lab'].flatten()\n", + " for prod_id in range(1,total_prod+1):\n", + " sig_prod = sig[selected_chan, :, np.argwhere(lab==prod_id)] \n", + " avg_sig_chan = np.transpose(np.mean(sig_prod, axis=0))\n", + " # compute ICA\n", + " ica_sig = FastICA(n_components=ncomp)\n", + " S_ = ica_sig.fit_transform(avg_sig_chan) # Get the estimated sources\n", + " A_sig = ica_sig.mixing_ # Get estimated mixing matrix\n", + " A_sig_norm = np.linalg.norm(A_sig,axis=0)\n", + " S_ = S_*A_sig_norm\n", + " \n", + " if sum(A_sig)<0:\n", + " S_ = -1*S_\n", + " \n", + " n200val = np.mean(S_[n200_ind])\n", + " featval = min(S_[erp_ind])\n", + " \n", + " info = mne.create_info(ch_names=['ica'], sfreq=freq, ch_types=['eeg'])\n", + " raw = mne.io.RawArray(np.transpose(S_[256:]), info)\n", + " psds, freqs = psd_multitaper(raw, low_bias=True, tmin=0.1, tmax=0.5, fmin=13, fmax=26, proj=True, n_jobs=1)\n", + " raw2 = mne.io.RawArray(np.transpose(S_), info)\n", + " psds2, freqs2 = psd_multitaper(raw2, low_bias=True, tmin=0.5, tmax=1.0, fmin=13, fmax=26, proj=True, n_jobs=1)\n", + " erspval = np.mean(10*np.log10(psds)) - np.mean(10*np.log10(psds2))\n", + "\n", + " n200_val[sub_id, prod_id] = -1*abs(n200val)\n", + " ersp_val[sub_id, prod_id] = -1*erspval\n", + " feat_val[sub_id, prod_id] = featval\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "N200 and ERSP correlation: (0.0025271910663613034, 0.9763588794947393)\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEXCAYAAAC6baP3AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3XdUk9cbB/BvGGGHISiCIgKCDKFoFUFFq4B1g6OlQ1DcCHXXUW0dHHHWggxx1dWf1jpQobXFUbEVFyhQBVRGVUC2hB0g+f1BSY1JIKEQhs/nHM6R+9773vui5uHOl8Hj8XgghBBCJCTX3g0ghBDSuVDgIIQQIhUKHIQQQqRCgYMQQohUKHAQQgiRCgUOQgghUqHAQQghRCoUOAghhEiFAgchhBCpUOAghBAiFQochBBCpKLQ3g0ghJDOxpYxUeK8SbyoNmxJ++gygUOav0givSReFHycQ9q7GV3a4Vg/VFVVt3czujQVFeX2bkKX0GUCByGEyIqc3Ls9yk+BgxBCpMRgyLd3E9oVBQ5CCJGSvLxiezehXVHgIIQQKdFQFSGEEKkwGBQ4CCGESEFOjuY4CCGESIF6HIQQQqRCk+OEEEKkQpPjhBBCpEL7OAghhEiFehyEEEKkQpPjhBBCpELLcQkhhEiFVlURQgiRyrs+x/FuPz0hhLQAgyEn8Zc0srKyMGfOHNjb22Po0KHYsmULqqqqmi1XWVmJXbt2wcXFBXZ2dnBzc0NISAg4HE5LH7FJ1OMghBAptcUcB5vNhpeXFwwMDBAUFITi4mIEBgaiuLgYe/bsabLsxo0bceXKFSxbtgz9+vVDUlISgoODwWazsW7dulZvKwUOQgiRUlusqjp16hTYbDYiIyOho6MDAJCXl8fKlSvh6+uLfv36iSxXV1eHy5cvY+7cuZg5cyYAYOjQocjJyUFUVFSbBA4aqiKEECnJyytK/CWp2NhYDB06lB80AGDs2LFgMpmIjY0VW47H46G+vh4aGhoC6SwWCzweT/qHkwD1OAghRErSTI6z2Wyw2WyhdBaLBRaLxf8+PT0d06ZNE8jDZDJhZGSEjIwMsfdXVFTElClTcPz4cQwcOBBmZmZITk7G6dOn8fnnn0vcTmlQ4CCEEClJc+TI0aNHERISIpTu5+cHf39//vdsNlsgkDRisVgoLS1tso7Nmzfjm2++wUcffcRPmzVrFvz8/CRupzQocBBCiJSk6XF4e3vDw8NDKF1UkGip3bt348aNGwgICICxsTEePnyI0NBQ6OrqYt68ea1WT6N2CRzl5eV49eoVampqhK5ZW1u3Q4sIIURycvIMifO+PSTVVD5RQ1psNhsmJiZiyz158gSHDx9GWFgYxowZAwAYPHgw6urqEBwcjE8++QTq6uoSt1cSMg0cr169wrp16xAXFyd0jcfjgcFgICUlRZZNIoQQqTHkW39dkampKdLT0wXSOBwOnj9/jqlTp4ot9+zZMwCApaWlQLqVlRU4HA7y8vI6d+DYsGED0tLSsGbNGpiZmUFR8d3etk8I6ZwUmK0fOJydnREeHo6SkhJoa2sDAGJiYsDhcDBy5Eix5QwNDQEAjx49goGBAT/9r7/+AoPBEEhrLTINHAkJCdi8eTMmTJggy2oJIaRVtUWPw9PTEydOnICvry98fX1RVFSEbdu2Yfz48TAzM+PnW7duHSIjI/H48WMAgI2NDWxtbfHNN9+gqKgIffr0QVJSEvbv349p06ZBRUWl1dsq08ChqakJNTU1WVZJCCGtTpo5DkmxWCwcPXoUAQEB8Pf3h5KSEiZMmIBVq1YJ5ONyuaivr+d/Ly8vj3379iEoKAj79+9HYWEhevbsCR8fHyxYsKDV2wkADF4zO0TS09MRGxuLjIwM/pIwTU1NmJiYwNnZGaamphJXdurUKfz222+IiIho9WEqW8bEVr1fa1NRU8bsVVNhPdgc1oP7QUdPE9+tOYLD28+0d9MkksSLgo+z8JLCtqSgKAd3Hwc4ullAjaWMlxlFOH/wDh7de95kuS+DPNDf3lDs9eVTv8frwgoAgLvPENg49IFeTxaUVBRRnF+OpLgsRB+/j7LS6tZ8nGYdjvVDVdV/r5PD4SA8PAxRUVFgs9kwMzPD4sWL4eQ0rNmybDYbQUHf4dq1a6iqqoK1tQ2WL18uctHKw4cPERT0HVJSUqCqqgoXF1csXboUqqqq/DyZmZmIjIxEXFwcXr58AVVVVfTvb4lFixYJ3TM8PBwREfuE6mEymbh7914LfhLCVFSUW+U+0vxfOBzbNkti25PYHkd1dTW++uor/Pzzz1BUVISRkRF/ZUBGRgYuXLiAHTt2YPz48di6dSuUlJSarezp06fIysqCq6srBg8eDE1NTaE869ev/w+P03Fp67Kw8JtP8epFAVIfpMPJbWB7N6nDm7PWBYNGmeLKmSTkvXgNpw/7Y+n2idi5LBJPEnPElos6fh+xUY8E0uTk5TBr1Qd49eI1P2gAgHH/7shKzcOdK09QXVmLnn204TzRCnZOxtg450fUVNW22fO1la+/3oArV67g008/hZFRH1y6dAn+/v6IiNiP999/X2w5LpcLf39/PHmSBi8vb+jo6OD06R8xb95c/PDD/9C3b19+3tTUVCxYMB/GxsZYvnwF8vPzcfz4Mfz999+IiIjg5zt//hzOnz8PFxcXfPTRRygvL8fZs2fg5TUTISGhcHR0FGrHmjVroaHx72RuR3z3RVv0ODoTsYFj165d+PPPP7Fz5064ubmByWQKXOdwOIiJiUFAQAB27twp0Qf+9evXATSsgY6Pjxe6zmAwumzgKMgtxhgDLxTkFsOgT3dczjrc3k3q0PpadoeDizl+2ncLv/wvAQDw56+p2HLkE3zkOwwBC34SW/bx/RdCaQMcjKCgKI/bMWkC6d99GSWUN/3RKyzeMg72w/vidsyT//gkspWcnIzLly9jyZIlmD3bBwAwadIkTJ8+DXv2fIsffvif2LIxMTFITHyI7du3Y+zYDwEArq6umDJlMsLCwrBz505+3r1790JdXR0HDx7iH3VhYGCAzZs34ebNmxgxYgQA4MMPx2HhwkUCvRB3d3d4eLgjPDxMZOBwcXGBrq7uf/9htCEGBQ7RoqOjsXbtWkycKHoIiMlkYsKECaitrcX27dsl+sC/du1ay1vaydVy6lCQW9zezeg03h9pBm49Fzcu/ttzqOPU42Z0CqYvcEQ3fQ0UvSqT+H5D3SzA5fIQJ0EgaLyvqnrzveiO5sqVK5CTk8O0adP5aUpKSnB398DevcHIzs7mr8IRVVZbWxuurm78NB0dHbi5uSEqKgrV1dVQVlZGeXk57ty5DU9PT4HzkSZNmoRdu3bit99+4wcOKysroXq0tLQwcOBA3LlzR2Q7eDweysvLoaqq2mHfe6HA7Hi9IFkS+7dSXV0tUdTX1dVFdbVsx4JJ12fUTxf5OWxUlgtuEs1MyQMA9OmnJ/G9mMoKsB/WF08Sc1CSXy50ncEA1DWVodlNFeZ2Bvh0iTPq67hITXj53x6iHaSmpqJXr15CG85sbGwAAGlpqU2WtbCwEPqwtrGxQU1NDbKyMgE0DDnX1dXBykpwjkJRUREWFhZN1tGosLAIWlpaIq9NmTIZw4cPg5OTI9asWYOCgoJm7ydrcvJyEn91RWJ7HAMHDkRoaChsbGxEzkUAQGlpKcLCwpocNxUlJSUFmZmZIl8y4u7uLtW9SNek2U0NpUUVQumNaVq6kq/OGzjCBMqqTMT9JvoDTc9AE9tOzuR/X5xfhv1bfkPO3yVStrr9FRYWQE9POKg2/hKYny/+Q7iwsAB2dnYiyurxy/bvb4nCwkIAgJ6e8C+Wurp6yMrKarKNCQkJSEpKhI/PHIF0FosFT09P2NragclkIiEhAadP/4i//krGyZOnhE5/bU80VCXG119/jZkzZ2LUqFFwdHSEmZkZ/y+urKwM6enpiIuL4y8hk0RpaSnmzZuHpKQkMBgM/pG/DMa/fwkUOAgAMJUU8Lq2Xii9ltOQpqgk+UpyRzcLcGrqcP/3dJHXSwrKsWtZJBSVFNDHXA+DnE2hpNI5N6fW1NSIXLHYuHhF1DE/b5Z9ey6zoSxToGxNTcMIg6Ki6LxNjUAUFxdh7do1MDQ0xJw5goHjs88+E/jexcUFNjY2WLduLU6dOol58+aLva+s0eS4GH369EF0dDROnjyJmzdv4syZM/xzVFgsFkxNTbFo0SKhcc6m7NixA9XV1YiMjIS7uzsOHToELS0tXLx4EdevX0dwcHDrPBXp9Dg1dVBUFB5HVvxnbLm2pk6i+7C0VWA1qDcSbmagqkL0azRrOfV4HN8wLJV4Kwsp8S+xNnQaykqqkBiX1aL2txclJSXU1gqvBGv80G9q9aOSkpLIUYCaGo5AWSWlhiWttbWi8yori17yWlVVCX9/f1RUVOLIke8FJszFGT9+PL79djdu377TwQJH1xyCklSTv7ZpaGhg/vz5mD+/df7C4uLisGLFCv6brDQ0NGBtbQ1ra2swmUyEhISIPH6YvHtKiyrQTV/4YDjNbg1DVG8uqW2Kg4s55BXkEPfWaqqmPE3OxevCCgx1Ne90gUNXVw+5ucJLlRuHl7p3Fz83pKurh8JC4aGsxrTGso3DXgUFhSLzihoqq62txfLly/H06VOEhYXDzEz02+xE6dGjB9jspo8VlzV5mhyXneLiYvTo0QPy8vJQVVXF69ev+decnJxEHn5I3k3PnxWiuwFLaGWTiVWPf65LNmE61MUcZa+rkBz3t1T1KzDloaIuPBTT0VlYWODly5dCp6wmJyfzrzdVNi0tDVwu962yf0FJSQnGxg37OMzMzKCgoIDHjwX3ytTW1iItLQ3m5oJ1cLlcrF//Fe7evYvAwECp5kR5PB5ycnL4Zzd1FHLyDIm/uqImA0dNTQ1OnjyJhQsXYuzYsXBwcICDgwPGjh2LhQsX4tSpU02Omb6tZ8+eKC5uWJJqbGyMK1eu8K/Fx8e3yZkqpHOK/z0dcvJyGDn535U7CopyGD7eElmp+SjMbVgyq9lNFfpGWpAXMXSg31sLfS174N71Z6iv5wpdV1ZVhIKicLn3R5lCnaWMrNT8Vnwi2XB1dQGXy8XZs/+eSMDhcHDhwgVYWVnB0LAXAKCgoACZmZkCw1quri4oKSlBTMxv/LTG70eMGMEfgtLQ0ICDgwN++eUXlJf/u0otKuoSKisr4ebmKtCmbdu24ddff8XateswZoyL2LY3fja86fTp0ygpKZFo17ss0aoqMbKzszF79mzk5OTA3t4ew4YN4y/xY7PZePbsGQICAvD999/j8OHDYteGv2nYsGG4desW3Nzc4OXlhTVr1iA5ORlMJhNJSUnw8fFpvSfrgDwXT4SGlhpYWg3DLYM/sIW8QkOX9+TeSyhnV7Zn8zqUjJQ83Lv2FB5zHaCuqYz8l6VwGmsBXX0Wdq+4wM83bb4jho+zxKqPjgrt63B0a/jNV9wwVR/z7lj4jRvuXn+GvJevwQADfft3xxCXfijIZSPmTFLbPWAbGTDAFq6ubggNDUVJyWsYGRkhKuoScnKysW/fv8d5BAcH49Kli4iO/pn/f9fFxRW2tj9g48aNyMzMgra2Nk6f/hF1dXXw9V0sUI+fnz+8vb0wZ44Ppk+fjvz8fBw7dgxDhgzBiBHO/HwnTpzA6dM/wtbWDioqyoiOFtxwOXr0aKioNMx1jB8/Dm5ubujXrx+YTCU8ePAAv/56Gebm5vj444/QkdCqKjG2bNkCdXV1xMTEoGfPniLz5Obmws/PDwEBAQgPD2+2slWrVvFXXLi7u0NNTQ2XL19GTU0NNmzYAE9PzxY+RufgvdIDhsY9+N8PGzsQw8Y2HD0SfeI6BY63HNh6BR6v/jmrSkMJ2ZnFCF4bhdQH2RKVd3AxR352KdL/eiXyen72ayTGZWHAECM4T7CCnLwcivLKcPVsMqJP3EcFu3PuTwoICEBYWBh+/jkapaWlMDMzQ1BQMAYPHtJkOXl5eYSEhGLPnj04deokqqurYW1tjY0bNwm9SMjS0hIREREICgrCrl27oKqqismTp2DJkiUCqyTT0hqCdlJSIpKSEoXqbAhcDYFj3LjxSEx8iKtXr6KmpgY9exrAy8sb8+bN4weXjqKrDkFJSuwhh/b29vjuu++aPAceAG7cuIGlS5fiwYMHbdJASXX0Qw47u/Y45PBd01qHHBLxWuuQw/WLf5Q4b0Dox61SZ0citsehpKSEiormV65UVFSIXPvdlIYNQEnIzc3F7Nmzoa+vj6SkJPTq1Qs6OjpS3YsQQmTtXV9VJTZwuLm5Ydu2bdDQ0OCfO/O2P/74A9u3b8eHH34oUWVlZWVYtmwZ/vjjD6irq6OiogKTJ0+Gvr4+jh49Cm1t7S57yCEhpOt414eqxAaO1atXIy8vD/PmzQOLxYKJiYnAzvHMzEyw2WyMHDkSq1evlqiywMBApKen4/Tp07CysuKfnwMAw4cPx8GDB//j4xBCSNuTk6PAIZKamhoiIiKQmJiI2NhYpKen89eG6+vrY9iwYRg5ciRsbW0lruzq1av46quvYGtrK/AGK6DhSOacHPHvWCCEkI6io57aKyvNHvhjZ2cn8uCzlqipqRG7kaeiouKd/8sghHQO7/pQlUw/qfv374/Lly+LvPb7779L1XshhJD2IicnJ/FXV9RkjyMvLw9nzpxBfn4+zMzM4O7uLnSgYXp6OjZt2oRjx441W5mvry8WLlyI6upqjBs3DgwGA0lJSbh06RLOnTuHQ4cO/benIYQQGVBQoFVVIj1//hwzZsxAZWUl9PX1cebMGYSHhyMwMFBgb0d5eTnu3ZPsRfLOzs4IDg5GYGAgoqOjAQCbNm1Cz549sWfPHjg4OPzHxyGEkLZHk+Ni7N69G4aGhjh48CB0dHSQm5uLgIAA+Pr6/qdd3i4uLnBxcUFWVhaKi4uhqakJU1PTFj8AIYTIWlc9g0pSYgNHQkICvv76a/6GvJ49eyI0NBQRERHYtGkT8vLysGTJkhZXbGxsDGNj4xaXJ4SQ9kI9DjEqKiqE3lsMAAsWLED37t2xYcMGFBYWwsPDQ6oKCwsLce3aNeTm5gqdrMtgMLBq1Sqp7kcIIbLWVSe9JSU2cBgZGSExMVHkvIOHhwdYLBaWL1+Ohw8fSlzZlStXsGLFCtTU1EBTU1PkKy4pcBBCOjqaHBfDyckJZ86cwdy5c0VG1zFjxuDAgQPw9fWVuLKdO3di+PDh2LJlC51JRQjptOhYdTFmz56NIUOGoLKyEurq6iLzDBkyBKdPn0ZiovBxyaLk5+dj48aNFDQIIZ0aDVWJoaenh1GjRjV7AxMTE6Gz+sVxdHREamoqHB0dJW4gIYR0NDQ5LkObNm2Cv78/eDwenJycRE6+GxgYyLJJhBAiNepxiGFvby/wJq+mMBgMxMfHN5tPTk4OLBYLO3bsELo3j8cDg8FASkqKRHUSQkh7oR6HGD4+Ps0Gjvj4eMTFxUkcYL788ks8fvwYfn5+MDY2FrmqihBCOjoFxbZZVZWVlYUtW7YgISEBSkpKmDBhAlauXAkVFRWJ7xETEwM/Pz/069cPUVFRzRdoAbGBw9/fX2yh+/fvIyQkBLdv34aVlZXEK6vi4+MREBCAiRPpNa+EkM6rLYaq2Gw2vLy8YGBggKCgIBQXFyMwMBDFxcXYs2ePRPeoqqrC1q1boaur2+rte5NUcxx3795FaGgo7t69C0tLS4SFhWH06NESl9fX15cqchJCSEfUFkNVp06dApvNRmRkJH/lqby8PFauXAlfX1/069ev2XuEhYWhV69eMDQ0xF9//dXqbWwkUdi8ffs2Zs6cCS8vL5SXlyMsLAznzp2TKmgAwBdffIGIiAiUlJS0qLGEENIRtMWx6rGxsRg6dKjAdoWxY8eCyWQiNja22fLp6ek4fvw4NmzY0KJnkkaTPY64uDiEhIQgPj4eAwYMQEREhMDJuNK6ePEiXr16hdGjR8PS0lJoVRWDwUB4eHiL708IIbLQFi9ySk9Px7Rp0wTSmEwmjIyMkJGR0Wz5zZs3Y/r06TA3N2/1tr1NbOD45JNP8PDhQ9jZ2eHAgQMYMWLEf66soqICffr0EfieEEI6G2l6Emw2m//a7TexWCyBX57ZbLbILQosFgulpaVN1hEdHY0nT55g7969ErfrvxAbOB48eAAASEtLa/YUXEmX4x4/flzK5hFCSMejoCB54Dh69ChCQkKE0v38/JpchCSp8vJybNu2DcuXLxcZeNqC2MDh5+cnkwYQQkhnw5Bictzb21vkKeJvf8izWCyRPRM2m93k6Rz79u2DlpYWXF1d+eVra2vB5XLBZrOhrKwMJpMpcXslQYGDEEKkJM2qqreHpMQxNTVFenq6QBqHw8Hz588xdepUseUyMjLw5MkTkSeZDx48GGvXrsWsWbMkbq8kZHrkCCGEdAWSbnqWhrOzM8LDw1FSUgJtbW0ADZv5OBxOk4uSli5dCm9vb4G0/fv3IzMzE4GBgQLzyq2FAgchhEipLfZxeHp64sSJE/D19YWvry+Kioqwbds2jB8/HmZmZvx869atQ2RkJB4/fgwAIldRnT9/Hnl5eSJ7Ia2BAgchhEhJXorJcUmxWCwcPXoUAQEB8Pf35x858vbL7bhcLurr61u9fmkweDwer11bQAghncy1355InHe0W9vvq5A16nEQQoiU6HTcLsLHWXidNGk9h2P9YMugwynbUhIvCrynp9u7GV0ao99HrXMfChyEEEKkIdcGq6o6EwochBAiJepxEEIIkUpbrKrqTChwEEKIlGhynBBCiFRoqIoQQohUaHKcEEKIVKjHQQghRCo0OU4IIUQqNDlOCCFEKjRURQghRCo0OU4IIUQq1OMghBAiFZrjIIQQIhU5eQochBBCpNAW7xzvTChwEEKIlN7xkSoKHIQQIi2aHCeEECKVd3ykigIHIYRIi+Y4CCGESEWehqoIIYRI4x3vcFDgIIQQadFQVTtISUlBZmYmOByO0DV3d/d2aBEhhEiO8W6fqi7bwFFaWop58+YhKSkJDAYDPB4PgGD0psBBCOno3vVDDmUaN3fs2IHq6mpERkaCx+Ph0KFDOHv2LLy8vNC7d2+cP39els0hhJAWkZNjSPzVFcm0xxEXF4cVK1agX79+AAANDQ1YW1vD2toaTCYTISEhCAkJkWWTCCFEau94h0O2PY7i4mL06NED8vLyUFVVxevXr/nXnJycEBcXJ8vmEEJIizAYDIm/uqJmexzp6emIjY1FRkYGSktLAQCampowMTGBs7MzTE1NJa6sZ8+eKC4uBgAYGxvjypUrcHZ2BgDEx8dDRUWlJc8gcwqKcnD3cYCjmwXUWMp4mVGE8wfv4NG9502W+zLIA/3tDcVeXz71e7wurAAAuPsMgY1DH+j1ZEFJRRHF+eVIistC9PH7KCutbs3H6VJU1JQxe9VUWA82h/XgftDR08R3a47g8PYz7d20Do/L5eLwuT9x6pd7yC8uQ5+eOpg3YwQmf/Bes2WTn7zE+asPcTc5A9l5r6HFUoWdRS8smemCvoa6AnmPX7qNyzf/QmZ2IcoqqtG9mwYcBpjA95NR6NVDu60er1V10REoiYkNHNXV1fjqq6/w888/Q1FREUZGRmCxWACAjIwMXLhwATt27MD48eOxdetWKCkpNVvZsGHDcOvWLbi5ucHLywtr1qxBcnIymEwmkpKS4OPj03pP1obmrHXBoFGmuHImCXkvXsPpw/5Yun0idi6LxJPEHLHloo7fR2zUI4E0OXk5zFr1AV69eM0PGgBg3L87slLzcOfKE1RX1qJnH204T7SCnZMxNs75ETVVtW32fJ2Zti4LC7/5FK9eFCD1QTqc3Aa2d5M6jT3HruDAmZuY4TYIA8x74eqdFHy5+ywYYGDSB3ZNlj1w5iYepDzH2OE2sDDugcKScvwQdQfTloTj5K55sDDW5+d9nJ4DY8NuGDPUEprqyniZV4Kffo3HtbupuBC8GD10WW39qP9ZF+1ISExs4Ni1axf+/PNP7Ny5E25ubmAymQLXORwOYmJiEBAQgJ07d2L9+vXNVrZq1SpUVzf8tuzu7g41NTVcvnwZNTU12LBhAzw9Pf/j47S9vpbd4eBijp/23cIv/0sAAPz5ayq2HPkEH/kOQ8CCn8SWfXz/hVDaAAcjKCjK43ZMmkD6d19GCeVNf/QKi7eMg/3wvrgd8+Q/PknXVJBbjDEGXijILYZBn+64nHW4vZvUKeQVsnEk8hY8xw3GxsWTAQAzxg7CzDWHsOP7XzHO2QYK8vJiy8/yGIZdq2aAqfjvR8q4EQMw2S8EEadj8e2XH/HTA5dOFSo/xtES05fuw/mrCVj48ajWe7A20laHHGZlZWHLli1ISEiAkpISJkyYgJUrV0o0GhMZGYl9+/YhOzsbRkZGWLx4McaPH98m7RQbOKKjo7F27VpMnDhR5HUmk4kJEyagtrYW27dvlyhwKCkpCfRMXF1d4erq2oJmt5/3R5qBW8/FjYv/9hzqOPW4GZ2C6Qsc0U1fA0WvyiS+31A3C3C5PMRJEAga76uq3nzv7l1Vy6lDQW5xezej07l6JwW1dfX4ZPwQfhqDwYDn+CFYufMnxD96DgfbvmLLD7Q0EkozNuwGM6PuSH+e32z9hnpaAAB2RecYhm2LI0fYbDa8vLxgYGCAoKAgFBcXIzAwEMXFxdizZ0+TZS9fvozVq1dj/vz5GDZsGK5cuYLly5dDTU0NI0eObPW2NjlUpaurK+4yn66uLr8XIY36+nqRGwA7+jyHUT9d5OewUVleI5CemZIHAOjTT0/iwMFUVoD9sL54kpiDkvxyoesMBqDGUoa8ghx69NLC9AVOqK/jIjXh5X9/EELekJKeC6aiAsyNewik25r3ariekdtk4BCFx+Oh6HW50BxHo5LSCtRzecjJf43QU9cBAE7vmbWg9bLXFkNVp06dApvNRmRkJHR0dAAA8vLyWLlyJXx9ffmrUUUJCgrChx9+iBUrVgAAhg4dioyMDOzdu1e2gWPgwIEIDQ2FjY0NNDU1ReYpLS1FWFgY3n//fYkqq6iowHfffYfLly+jqKiIvwHwTSkpKRI2vX1odlNDaVGFUHorD7RlAAAgAElEQVRjmpaumsT3GjjCBMqqTMT9liryup6BJradnMn/vji/DPu3/Iacv0ukbDUhTcsvKYOulprQKiA9bfWG68Vsqe956fdE5BWxsfiTD4Su1dXXw/GzbfzvtViq+GrBBAwf2FkCR+tHjtjYWAwdOpQfNABg7NixWLduHWJjY8UGjhcvXiAjIwPLli0TSJ84cSLWrl2L4uJigXu2BrGB4+uvv8bMmTMxatQoODo6wszMDBoaGgCAsrIypKenIy4uDiwWC0ePHpWosvXr1+PGjRvw8PCAsbExFBUVW+cpZIippIDXtfVC6bWchjRFJcm3xji6WYBTU4f7v6eLvF5SUI5dyyKhqKSAPuZ6GORsCiWVzvczIx1fTU0dFBWF/+0qMRvSqmvqpLpfxosCbA6Pgp1Fb0xzFV6gIC8nh8MBs1BbW49nL/Jx6XoiqqqFRyA6KmniBpvNBpstHHhZLBZ/wRHQsIJ12rRpAnmYTCaMjIyQkZEh9v6N195e4WpmZsa/LrPA0adPH0RHR+PkyZO4efMmzpw5w394FosFU1NTLFq0CJ6envyA0pybN29i/fr1mDpVeHKss+DU1EFRUXiSUJHZkFYr4X8wlrYKrAb1RsLNDFRViP4PU8upx+P4hmGpxFtZSIl/ibWh01BWUoXEuKwWtZ+82+rruShmC/aYNdVVoKSkgNpa4X+7NZyGNGUpfiEqKCnDgk3HoaGmjOB1npCXF94uxmAw4PRewwfdyMHmGONgiSn+IVBVZuLzSUOleaR2IU2P4+jRoyI3Nvv5+cHf35//PZvNFggkjVgsFn8rhCiN194u2zhS1FTZlmryX4OGhgbmz5+P+fPnt0plWlpa6NatW6vcq72UFlWgm77wX65mt4YhqjeX1DbFwcUc8gpyiHtrNVVTnibn4nVhBYa6mlPgIC2SW1gKlznfCqQd3eqD7toaiHuYAS6XCzm5fz/oC0oa5t6660i2RLasohrzvzkGdkU1ftg+Fz26SVbO2LAbLE164tLvSZ0kcAgPs4vj7e0NDw8PoXRRQaKzkOmRIwsWLMDhw4cxdOhQifZ9dETPnxXCcmAvqKorCUyQm1j1+Od6gUT3GepijrLXVUiO+1uq+hWY8lBRZzafkRAR9LTVcThglkBafxN9PHuej59+i8fTv/Nh0fffPReJaS/4eZpTw6nFos0nkJVdhMMBs2Bm1F2qttVwasERMQzcETG4ku+jentIqql8ooa02Gw2TExMxJZr7Fmw2Wzo6enx09/csN3amgwcNTU1OHfuHG7cuIHMzEz+ESFaWlro27cvRo0aBQ8PD4mDwIwZM/D8+XOMHj0a9vb2QkNcDAYDW7dubeGjyEb87+kY98lAjJxszd/HoaAoh+HjLZGVmo/C3IYVVZrdVKGixkRBNhv19VyBe+j31kJfyx64dj5Z6BoAKKsqoq62HnW1gtfeH2UKdZYyslKbX95IiChKTEX+ENGbxgztj20Hf8HJn+/y93HweDz8+Ms96GmrY5BVH37ektIKlLAr0VNPEyrKDb/E1NdzsWz7aTxMfYHQ9Z/BXsTyXKAhONTWcaGuKviZ8SDlOZ5k5WPiyAGt9ahti9f6Ac7U1BTp6YLznRwOB8+fP29yeL8xqGRkZAjMczTeq6mg01JiA0d2djZmz56NnJwc2NvbY9iwYfyoyWaz8ezZMwQEBOD777/H4cOHYWgo/iiNRj/99BMOHDgAFRUVPH36VGhTYWeQkZKHe9eewmOuA9Q1lZH/shROYy2gq8/C7hUX+PmmzXfE8HGWWPXRUaHluY5uFgAgdpiqj3l3LPzGDXevP0Pey9dggIG+/btjiEs/FOSyEXMmqe0esAvwXDwRGlpqYGk1DB8O/sAW8goNc1An915CObuyPZvXIenrasJrsiMOnfsD9VwebM0NcfV2Ku4/+hvblk2FosK/83onou4g9OR1HN3qw1+iu/3QZVy7k4oPhligtLwSF68/FLh/47ElBSXl8PgiDONG2MC0tx6YigpIy3yFyGsPoa6mhEWewiuwOiSe8C98/5WzszPCw8NRUlICbe2Go1diYmLA4XCaXFLbu3dvmJiY4OeffxbYFxcVFYUBAwa0+sQ40ETg2LJlC9TV1RETE4OePXuKzJObmws/Pz8EBAQgPDy82cqCg4MxZcoUbNq0CcrKyi1vdTs7sPUKPF79c1aVhhKyM4sRvDYKqQ+yJSrv4GKO/OxSpP/1SuT1/OzXSIzLwoAhRnCeYAU5eTkU5ZXh6tlkRJ+4jwp259gk1V68V3rA8I39CMPGDsSwsQ0re6JPXKfAIcaKWa7Q1FDBj5fvIfLqA/Qx0MG2ZVPhPsa+2bIpGbkAgOt303D9rvAvRI2BQ0tDFZNG2eFeciaibySDU1uH7t00MGmkLRZ6joJhd63Wfag2wmiDHoenpydOnDgBX19f+Pr6oqioCNu2bcP48eP5K6QAYN26dYiMjMTjx4/5aV988QWWLVsGIyMjODk54erVq/jzzz8RERHR6u0EAAZP1GYKAPb29vjuu++a3Txy48YNLF26FA8ePGi2ssa9IY6Oji1rbRN8nOk49rZ0ONYPtgzRpwiQ1pHEiwLv6en2bkaXxuj3UfOZJMBh50qcl8kS/Yu3KJmZmQgICEB8fDz/yJFVq1YJbIxes2YNzp8/j7Q0wQB9/vx5oSNHJkyYIHHd0hDb41BSUkJFRfMrhCoqKiQecho1ahQSEhLaJHAQQojMSDE5Lo2+ffvi0KFDTebZtm0btm3bJpTu4eEhcvVWWxAbONzc3LBt2zZoaGhgxIgRIvP88ccf2L59Oz788EOJKps6dSo2bdqEqqoqgTmTN1lbW0vYdEIIaR+MNpjj6EzEBo7Vq1cjLy8P8+bNA4vFgomJicDO8czMTLDZbIwcORKrV6+WqLK5c+cCAA4ePIiDBw8KbKLh8XhgMBgd/sgRQggBOsey4bYiNnCoqakhIiICiYmJiI2NRXp6On+Nsb6+PoYNG4aRI0fC1tZW4sqOHTv231tMCCHtjXocTbOzs4OdXdMvcZHUkCFDms9ECCEdXRusqupMZLpznBBCugKa42hCXl4ezpw5g/z8fJiZmcHd3V1ot3d6ejo2bdokdhhq4MCBOHbsGGxsbGBvb9/s4WAJCQlSPgIhhMgYt/Oc5NsWxAaO58+fY8aMGaisrIS+vj7OnDmD8PBwBAYGCuztKC8vx71798RW4OPjwz8/xcfHp03OsSeEEFmiHocYu3fvhqGhIQ4ePAgdHR3k5uYiICAAvr6+Ur0f3M/Pj//nN48QJoSQTusdn+MQPij/HwkJCVi0aBH/nJOePXsiNDQUX3zxBTZt2oSgoCCZNZIQQjoUHlfyry5IbI+joqJC5Aa9BQsWoHv37tiwYQMKCwul3qn44MEDXL58Ga9evUJNjeB7uxkMhkRnXhFCSLt6x3scYgOHkZEREhMT4eDgIHTNw8MDLBYLy5cvx8OHD0WUFu2HH37Ali1boK2tjT59+nTKV8cSQggDXbMnISmxgcPJyQlnzpzB3LlzBd4I1mjMmDE4cOAAfH19Ja7syJEjmDp1KjZv3gwFBVoJTAjppNrorKrOQuyn9+zZszFkyBBUVlZCXV1dZJ4hQ4bg9OnTSExMlKiywsJCTJo0iYIGIaRzo6Eq0fT09DBq1Khmb2BiYiLxG6YcHByQkpJCp+MSQjo1Wo4rQ0uXLsXKlSuhrKyM4cOHi5x819LqHC9yIYS8w6jHIZoku7wbMRgMxMfHN5vP3d0dALB582ax96bTcQkhHR6XAodIkuzyjo+PR1xcnMQBZuvWrbRznBDS+dXXNJ+nCxMbOJra5X3//n2EhITg9u3bsLKyknhl1dSpU6VvISGEdDQ0VCW5u3fvIjQ0FHfv3oWlpSXCwsIwevTotmobIYR0TDRU1bzbt28jNDQU9+7dg7W1NcLCwvDBBx9IVMGkSZOwe/dumJubY9KkSU3mZTAYuHjxokT3JYSQdsOra+8WtKsmA0dcXBxCQkIQHx+PAQMGICIiQuBkXEnY2NhARUUFQMP7xGmOgxDS6VGPQ7RPPvkEDx8+hJ2dHQ4cOIARI0a0qILAwED+n7dt29aiexBCSIdCgUO0Bw8eAADS0tKwZMmSJm8i6XJcQgjpEmhVlWhvvkejNRUWFuLatWvIzc0VeTruqlWr2qReQghpNbSqSrS2CBxXrlzBihUrUFNTA01NTZGn41LgIIR0eDRUJTs7d+7E8OHDsWXLFv4LogghpLPhSdHj6IrLgcS+AbAt5Ofn4/PPP6egQQjp3Lh1kn91QTINHI6OjkhNTZVllYQQ0vq49ZJ/dUEMHo/Hk1VlBQUF8Pf3h5ubG5ycnESejmtgYCCr5hBCSItwb22UOK+ck+R5OwuZznHIycmBxWJhx44dQhsBeTweGAxGi0/Hraqqbo0mEjFUVJTBe3q6vZvRpTH6fQRbxsT2bkaXlsSLap0b0c5x2fnyyy/x+PFj+Pn5wdjYmN45TgjpnDrQEFRSUhICAwPx6NEjaGpqYsaMGVi8eDHk5eUlvseRI0cQGBiIUaNGISIiotn8Mg0c8fHxCAgIwMSJ9FsVIaQT6yD7OF68eIFZs2ZhyJAhiIiIQEZGBnbs2AEOh4OVK1dKdI+8vDyEhISgW7duEtcr08Chr6/PP7eKEEI6rQ7S4zh48CBYLBaCg4PBZDLh6OiIsrIyhIaGYu7cuRK9UXX79u1wdXXFy5cvJa5XpquqvvjiC0RERKCkpESW1RJCSOuqr5H8qw3FxsbCxcUFTCaTnzZx4kRwOBzcvn272fJxcXGIjY3FihUrpKpXpj2Oixcv4tWrVxg9ejQsLS2FVlUxGAyEh4fLskmEECK9DtDjqKysRE5ODkxNTQXSe/XqBRUVFWRkZDRZvra2Fps3b4afnx90dXWlqlumgaOiogJ9+vQR+J4QQjodKeY42Gw22Gy2UDqLxRK5JUFSZWVl/PuIundpaWmT5b///nswGAx8/vnnUtct08Bx/PhxWVZHCCFtgifFjvCjR48iJCREKN3Pz0/oFd1lZWXIz89v9p7/db9bTk4OwsPDERoaCgUF6cOATAMHIYR0BTwphqq8vb3h4eEhlC6qpxATE4O1a9c2e89jx45hwIABACCyN8Nms6GpqSm2/M6dOzFo0CDY2Njwy9fV1aGurg5sNhsqKipNbpegwEEIIVKSJnBIMyQ1depUTJ06VeJ7GxgYID09XSAtOzsbVVVVMDExEVsuIyMDqampGDx4sNC1wYMHIzQ0FC4uLmLLU+AghBApcWs57d0EAICzszOuXr2KL7/8kr+yKjo6mr80V5yAgABUVlYKpG3duhXKyspYvnw5+vXr12S9FDgIIURK0vQ42tLcuXNx6dIlLF26FDNnzkRGRgbCwsLg7e0tMFTl7e2NnJwcxMTEAAB/mOtNLBYLqqqqcHBwaLZeChyEECKljhI4evfujSNHjmDr1q2YP38+NDU1MXv2bKEX8XG5XNTXt16bZXo6bluiQw7bFh1y2PbokMO211qHHFb8OEnivGofX2qVOjsS6nEQQoiUOkqPo71Q4CCEEClx69r2KJGOjgIHIYRIiXochBBCpEKBgxBCiFR4rbhCqTOiwEEIIVKiHgchhBCpcClwEEIIkQa3rmMcOdJeKHAQQoiUaKiKEEKIVChwEEIIkYo0L3LqiihwEEKIlGg5LiGEEKnQqipCCCFSoVVVhI/D4SA8PAxRUVFgs9kwMzPD4sWL4eQ0rNmybDYbQUHf4dq1a6iqqoK1tQ2WL18Oa2trobwPHz5EUNB3SElJgaqqKlxcXLF06VKoqqry82RmZiIyMhJxcXF4+fIFVFVV0b+/JRYtWiR0z/DwcERE7BOqh8lk4u7dey34SXQsXC4Xh8/9iVO/3EN+cRn69NTBvBkjMPmD95otm/zkJc5ffYi7yRnIznsNLZYq7Cx6YclMF/Q11BXIe/zSbVy++RcyswtRVlGN7t004DDABL6fjEKvHtpt9XidjoqaMmavmgrrweawHtwPOnqa+G7NERzefqa9myYzNDlO+L7+egOuXLmCTz/9FEZGfXDp0iX4+/sjImI/3n//fbHluFwu/P398eRJGry8vKGjo4PTp3/EvHlz8cMP/0Pfvn35eVNTU7FgwXwYGxtj+fIVyM/Px/Hjx/D3338jIiKCn+/8+XM4f/48XFxc8NFHH6G8vBxnz56Bl9dMhISEinwt5Jo1a6Ghoc7/Xk5OvpV+Mu1rz7ErOHDmJma4DcIA8164eicFX+4+CwYYmPSBXZNlD5y5iQcpzzF2uA0sjHugsKQcP0TdwbQl4Ti5ax4sjPX5eR+n58DYsBvGDLWEproyXuaV4Kdf43HtbiouBC9GD13J3hvd1WnrsrDwm0/x6kUBUh+kw8ltYHs3Sea43C7xGqMWo8Dxj+TkZFy+fBlLlizB7Nk+AIBJkyZh+vRp2LPnW/zww//Elo2JiUFi4kNs374dY8d+CABwdXXFlCmTERYWhp07d/Lz7t27F+rq6jh48BA0NDQANLxwfvPmTbh58yZGjBgBAPjww3FYuHCRQC/E3d0dHh7uCA8PExk4XFxcoKurK5TemeUVsnEk8hY8xw3GxsWTAQAzxg7CzDWHsOP7XzHO2QYK8uID5CyPYdi1agaYiv/+Ux83YgAm+4Ug4nQsvv3yI3564NKpQuXHOFpi+tJ9OH81AQs/HtV6D9aJFeQWY4yBFwpyi2HQpzsuZx1u7ybJ3DseNyDX3g3oKK5cuQI5OTlMmzadn6akpAR3dw88evQI2dnZTZbV1taGq6sbP01HRwdubm6Ijb2B6uqGtxOWl5fjzp3bGDduHD9oAA0BSlVVFb/99hs/zcrKSiBoAICWlhYGDhyIjIwMke3g8XgoLy8Hl8uV7uE7sKt3UlBbV49Pxg/hpzEYDHiOH4KC4jLEP3reZPmBlkYCQQMAjA27wcyoO9Kf5zdbv6GeFgCAXUFvmGxUy6lDQW5xezejXfG4PIm/uiIKHP9ITU1Fr169wGIJDkfY2NgAANLSUpssa2FhATk5wR+njY0NampqkJWVCQB4+vQp6urqYGUlOEehqKgICwuLJutoVFhYBC0tLZHXpkyZjOHDh8HJyRFr1qxBQUFBs/fr6FLSc8FUVIC5cQ+BdFvzXg3XM3KlviePx0PR63Jos9REXi8prUBhSTmS0l5izXfnAABO75lJXQ/puurqeRJ/dUU0VPWPwsIC6OnpCaU3Dv3k54v/EC4sLICdnfBYu66uHr9s//6WKCwsBADo6QkPJ+nq6iErK6vJNiYkJCApKRE+PnME0lksFjw9PWFrawcmk4mEhAScPv0j/vorGSdPnhLo3XQ2+SVl0NVSA4PBEEjX026Yy8kvZkt9z0u/JyKviI3Fn3wgdK2uvh6On23jf6/FUsVXCyZg+EAKHORfXahT3yIyCxx//vknjh8/juzsbHTv3h0ffvghZsyYIavqm1VTUwNFRUWhdCUlJf71psoymUwRZZkCZWtqGoY7FBVF520c0hKluLgIa9eugaGhIebMEQwcn332mcD3Li4usLGxwbp1a3Hq1EnMmzdf7H07upqaOigqCv8zVWI2pFXXSLeDN+NFATaHR8HOojemuQpP6srLyeFwwCzU1tbj2Yt8XLqeiKrqd3vpJRHWVYegJCWTwHHt2jX4+vpCQ0MDffv2xZMnT3Dr1i28evUK/v7+smhCs5SUlFBbWyuU3vih3xhAxJXlcIQ/XGpqOAJllZSUAQC1taLzKisri7x/VVUl/P39UVFRiSNHvhea+xBl/Pjx+Pbb3bh9+06nCBz19VwUsysE0jTVVaCkpIDaWuHgUMNpSFNWkvyfcEFJGRZsOg4NNWUEr/OEvLzwSC2DwYDTe6YAgJGDzTHGwRJT/EOgqszE55OGSvNIpAt7x+OGbALH/v374eDggLCwMKipqYHL5SIgIACHDx/G4sWLheYG2oOurh5yc3OE0huHl7p3Fx7GerNsYaHwUFZjWmPZxmGvgoJCkXlFDZXV1tZi+fLlePr0KcLCwmFm1k+Cp2nQo0cPsNmlEudvT7mFpXCZ861A2tGtPuiurYG4hxngcrkC/04KSsoBAN11JFsiW1ZRjfnfHAO7oho/bJ+LHt0kK2ds2A2WJj1x6fckChyE711fjiuTT+yMjAz4+PhATa1hMlJOTg6LFi1CVVVVk6uVZMnCwgIvX74Emy04Zp6cnMy/3lTZtLQ0odVMycl/QUlJCcbGDfs4zMzMoKCggMePHwnkq62tRVpaGszNBevgcrlYv/4r3L17F4GBgU3uJXkbj8dDTk4OtLU7x8Y1PW11HA6YJfDV30Qf/U16glNbh6d/C66ASkx7AQDob6Iv6nYCaji1WLT5BLKyi7Dv689hZtRdqrbVcGpRXkmrqsi/uDzJv7oimQQONpst9AGmo6PDv9YRuLq6gMvl4uzZf3e/cjgcXLhwAVZWVjA0bFjFU1BQgMzMTIFhLVdXF5SUlCAm5t/ltI3fjxgxgj8EpaGhAQcHB/zyyy8oLy/n542KuoTKykq4ubkKtGnbtm349ddfsXbtOowZ4yK27cXFwksjT58+jZKSEol2vXcESkxFOL1nKvClqa6CMUP7Q1FBHid/vsvPy+Px8OMv96CnrY5BVn346SWlFch4USAwJ1Ffz8Wy7afxMPUFvlvjCXtLI5H1NwQH4XmsBynP8SQrHzZmBq34tKSzq6/jSvzVFclscry8vByvX7/mf1//z+mSb6cDELvctC0NGGALV1c3hIaGoqTkNYyMjBAVdQk5OdnYt+/f4zyCg4Nx6dJFREf/DENDQwCAi4srbG1/wMaNG5GZmQVtbW2cPv0j6urq4Ou7WKAePz9/eHt7Yc4cH0yfPh35+fk4duwYhgwZghEjnPn5Tpw4gdOnf4StrR1UVJQRHR0lcJ/Ro0dDRaVhrmP8+HFwc3NDv379wGQq4cGDB/j118swNzfHxx9/hM5MX1cTXpMdcejcH6jn8mBrboirt1Nx/9Hf2LZsKhQV/t38dyLqDkJPXsfRrT5wsG3o5W0/dBnX7qTigyEWKC2vxMXrDwXu33hsSUFJOTy+CMO4ETYw7a0HpqIC0jJfIfLaQ6irKWGRp/AKrHeZ5+KJ0NBSA0urYRRh8Ae2kP/n7+Lk3ksoZ1e2Z/PaXFftSUhKZoHj7ZVAjWbNmiWUlpKS0satES0gIABhYWH4+edolJaWwszMDEFBwRg8eEiT5eTl5RESEoo9e/bg1KmTqK6uhrW1NTZu3AQTExOBvJaWloiIiEBQUBB27doFVVVVTJ48BUuWLBFYcpqWlgYASEpKRFJSolCdDYGrIXCMGzceiYkPcfXqVdTU1KBnTwN4eXlj3rx5/ODSma2Y5QpNDRX8ePkeIq8+QB8DHWxbNhXuY+ybLdu4z+P63TRcv5smdL0xcGhpqGLSKDvcS85E9I1kcGrr0L2bBiaNtMVCz1Ew7C77X2Y6Mu+VHjB8Y2/NsLEDMWxswyq16BPXu37geMcjB4PH47X5T+D8+fNS5ffw8JC6jqoqGoNuSyoqyuA9Pd3ezejSGP0+gi1jYns3o0tL4kU1n0kCcb6S741yDCtrlTo7Epn0OFoSCAghpKPitv3v2x1au+8cZ7PZyMrKgp6eHnr27NnezSGEkGbRznEZiI2Nxb1797BixQqB9L179yIiIoI/Ue7q6opdu3aJ3IVNCCEdRVc9g0pSMlmO+7///Q8vX74USLt27RpCQ0Nhbm6Or776Cp9++imuXLmCkydPyqJJhBDSYh3pdNykpCR88sknsLW1xYgRIxAcHMz/ZbwpxcXF+PrrrzFq1Ci89957mDBhAo4fPy5RnTLpcaSkpGDZsmUCaWfPnoWKigoOHTrE3+OhoKCAyMhIeHt7y6JZhBDSIh1lVdWLFy8wa9YsDBkyBBEREcjIyMCOHTvA4XCwcuXKJsv6+/vj77//xrJly2BgYIBbt24hICAAXC632c9gmQSOkpISGBj8u4GKx+Ph9u3bcHBwENgYOHz4cKlXYBFCiKx1kLiBgwcPgsViITg4GEwmE46OjigrK0NoaCjmzp0rdk/cq1evcP/+fWzduhXTpk0DADg6OiI1NRVRUVHNBg6ZDFVpa2vzz3wCGvYoVFRUYNCgQQL5lJSUJOpiEUJIe+JyeRJ/taXY2Fi4uLgIzAtPnDgRHA4Ht2/fFluu8eSLt1+5wGKxIMkODZn0OOzs7HDixAn+A/7www9gMBgYPXq0QL6nT5+iR48eYu5CCCEdQ70Uk+NsNlvk0UosFkvoxXHSqKysRE5ODkxNTQXSe/XqBRUVFbFvCgWA3r17Y+jQodi3bx+MjY1hYGCAuLg4xMTEYPPmzc3WLZPAsWTJEsyYMQOOjo5QV1dHXl4eJk2aJPTA0dHRQr0QQgjpaKRZjnv06FGEhIQIpfv5+f2n10qUlTVsLBQVfFgsFkpLmz4ZOzw8HMuWLcOkSZMANBw+u3r1ari7uzdbt0wCh6mpKS5cuIAzZ86grKwM1tbWQpsCi4qKYGlpicmTJ8uiSYQQ0mLSjEB5e3uL3AQt6gO/rKwM+fn5Qulve3POuCV4PB7Wrl2LrKws7N69G3p6erh16xZ27doFXV1dTJzY9AkGMtsA2Lt3b6GVVW/q1q0bNmzYIKvmEEJIi0kTOKQZkoqJicHatWubzXfs2DEMGDAAgOgTxtlsNjQ1NcWW//3333H58mVcuHAB/fv3BwA4ODigqKgI27dv7ziBQxLp6enYv38/tm/f3t5NIYQQsdpqznvq1KmYOnWqxPkNDAyQnp4ukJadnY2qqiqhA1bf9OzZM8jLywu9Z8jS0hI//fQTqqqqoKKiIra8zF69V19fj8TERPzyyy949EjwRUbJyclYvHgxJk2ahGvXrsmqSYQQ0iId5UVOzs7OuHr1qsCrq6Ojo/lLc8UxNFgQbWYAAA8GSURBVDREfX290Enkjx49Qrdu3ZoMGoCMehyvXr3CggUL8OTJE/B4PP6Kql27duGbb77BpUuXoKamhvnz52P27NmyaBIhhLRYHZfRfCYZmDt3Li5duoSlS5di5syZyMjIQFhYGLy9vQWGqry9vZGTk4OYmBgAwMiRI2FoaIglS5Zg8eLF6NGjB/744w+cP39eogl7mQSOoKAgvHz5EsuWLUP//v2Rk5OD/fv3Y/r06UhPT8fnn38Of3//JsfkCCGko+gohxz27t0bR44cwdatWzF//nxoampi9uzZ8PPzE8jH5XIF9sipqanh6NGj2LNnD7799luw2Wz06tULa9asweeff95svTJ5H8fo0aMxa9YseHl58dPi4+Px2WefYeHChVi6dOl/roPex9G26H0cbY/ex9H2Wut9HMenKEucd+aFrvfZJJMeR15eHmxsbATSGlcEODs7iypCCCEdVkc5cqS9yCRw1NfXQ0FBsCp5+Yb3EysrSx65CSGkI6DAISOHDx+Grq4u//vGEbKDBw9CR0dHIO/69etl1SxCCJEaBQ4ZMDAwQFJSksj0hw8fCqQxGAwKHISQDq22g0yOtxeZBA7am0EI6Ure9R6HTDYAfvvtt8jLyxNIu3HjBsrLywXSnj9/LvR6WUII6Wi4XMm/uiKZBI4DBw4IBI76+nosXLgQf//9t0C+kpIS/Pzzz7JoEiGEtFhH2TneXmQyVCVqq4gMto8QQkib6KoBQVId6pBDQgjpDDrKkSPthQIHIYRIiXoc7YjBeLejNiGkc6LAISPe3t5CgeKzzz4TSKN5D0JIZ9BVV0tJSiaB4+2TGgkhpDN713scMjkdlxBCupLNI5p+0dGbvr5Z1YYtaR8UOAghhEhFZq+OJYQQ0jVQ4CCEECIVChyEEEKkQoGDEEKIVChwEEIIkQoFDkIIIVKhwEEIIUQqFDgIIYRIhQIHIYQQqVDgaMbevXthYWEBT09Pkdfs7e353+fn52PHjh2YMmUK7O3tMXz4cHzxxRdCbzoEgPLycnz99ddwcHCAvb09Fi5ciJcvXwrly8rKwpw5c2Bvb4+hQ4diy5YtqKrqWkcY0M9Y9hp/5qK+du3aBQAYPXo0P83KygqjR4/GmjVrkJubK3S/Z8+e4YsvvoCzszMGDBgAZ2dnLFiwADdu3ODnOXfunEA9gwcPxscff4yrV6/K7LlJ66D3cUjowYMH+PPPPzFs2DCxeR49eoTffvsN06ZNw3vvvQc2m42IiAjMmDEDFy9ehL6+Pj/vihUr8OjRI2zYsAHq6uoIDg7GrFmzcOnSJaioNJyDw2az4eXlBQMDAwQFBaG4uBiBgYEoLi7Gnj172vyZZY1+xrKlrKyMo0ePCqX36NGD/+exY8fCx8cHdXV1+OuvvxAcHIxHjx7h3LlzUFRUBAA8f/4cM2bMgIWFBVavXo1u3bohJycHN27cwN27dzFy5EiB+x88eBAaGhooKSnB4cOH4evri4MHD2LEiBFt+8Ck9fBIk4KDg3nvvfceb8aMGTxPT0+R1xqVlpbyamtrBfIUFRXxrK2teXv37uWnPXz4kGdubs77/fff+WnZ2dk8Kysr3okTJ/hpERERPDs7O15RURE/7eLFizxzc3PekydPWu0Z2xv9jGXv7Z+rKB988AFv06ZNAmkRERE8c3NzXkJCAj/t22+/5b333nu8yspKoXvU19fz/3z27Fmeubm5wM+6rKyMN2jQIN78+fNb+iikHdBQlYQWL16MhIQExMXFic3DYrGgoCDYidPR0YG+vj7y8/P5aTdu3ICGhobAb1gGBgYYOHAgYmNj+WmxsbEYOnQodHR0+Gljx44Fk8kUyNdV0M+447OwsAAAgeEqNpsNdXV1fi/uTXJyTX/EqKuro2/fviKHEEnHRYFDQiNHjsSAAQMQEhIiVbnc3Fzk5OTAxMSEn5aeng4TExOh/1RmZmbIyMgQyGdmZiaQh8lkwsjISCBfV0E/Y9mrq6sT+uI1cWB2Y8Do3bs3P83a2hr5+flYv349UlJSwJXiLUf19fXIzc1F9+7dW/4QROYocEjBz88P9+/fx+3btyUuExAQABaLBQ8PD34am82GhoaGUF4Wi4XS0lKBfCwWq9l8XQn9jGWnsrIS1tbWQl+///47Pw+Px0NdXR1qamoQHx+P/f9v7/5jqfr/OIA/q92o7pDamqwyclnSTSST5F4VYYV+WSkrllyabiU/uipiWc2KprjrB/Or9XuztbZSW/qh9EsZf5SSVYhLXdG9lznfP5qzLjefjm9u0uvxl/M67/f1Pu/3dl68z/uet1wOkUgEe3t7tkxAQAD8/Pxw4cIF+Pv7w8nJCRKJ5KcPvXt6etDd3Y3m5makpqaiubkZ3t7eQ3255Deih+MceHh4wM7ODllZWXBxcfnP8jk5Obh16xaysrJgbGyshxb+/aiP9cfQ0BAFBQX94hYWFuzPRUVFKCoqYo8tLS2Rnp6uVX7MmDFIT0/Htm3bcPv2bTx+/Bj37t1DaWkpJBIJoqOjtcr/uPjB0NAQERERWLt27W+6KqIPlDg4ioqKQkREBCoqKgYsd+XKFRw9ehSJiYkQi8Va54yMjHQuaVQqlVo3PyMjIyiVSp3lfpyWGWmoj/Vj9OjRWv856LJ8+XKEhoZCrVajrKwM2dnZ2L9/P7tk90fW1tawtrbG1q1b0draitDQUMjlcoSEhMDExIQtl5ubCz6fD2NjY0ydOrXfMysy/NFUFUdisRh2dnYDzsOXlpZCJpMhPDwcGzZs6HfeysoKb9++7TeX/Pr1a62blZWVFWpra7XKaDQa1NfXj+ibGvXx8GFqagp7e3s4OTlBKpVi48aNKCkpwYsXL/6zXmBgILq7u/t9x8bGxgb29vaYPn06JY2/FCWOQYiMjER5eTmePHnS79yjR48glUqxcuVKSKVSnfUXL14MpVKJsrIyNtbQ0ICnT5/C3d2djbm7u6O8vBxtbW1s7MaNG9BoNP3Wxo801MfDU1RUFPh8Pk6ePMnGWlpadJatq6sDAEyePFkfTSN6ROl+EDw9PTFr1iw8ePAA48ePZ+O1tbWQSCSYNm0aVq1ahefPn7Pn+Hw+u3pHKBTCw8MDe/fuRVxcHPh8PjIyMmBmZobAwEC2TlBQEAoKCiCRSCCRSKBQKJCWlgYfH59+K4FGGurjodfT06PVf70mTpyIGTNm6KxjYmKC4OBg5OTk4NWrV7C2tsaJEydQU1MDPz8/zJw5E2q1Gvfu3UNRURGWLFkCc3Pzob4UomeUOAYpMjISkZGRWrHKykq0t7ejvb0d69ev1zrn7OyM/Px89jg9PR2HDx9GUlISNBoNFixYgIyMDK218EZGRsjLy0NKSgq2b98OAwMD+Pr6IiYmZmgvbpigPh5aKpUK69at6xf38fEZ8FvzmzdvRkFBAeRyOY4cOYIVK1ZArVYjPz8fTU1NGDNmDMzNzbFnz55+Y0RGhlHMQIu2CSGEkD7oGQchhBBOKHEQQgjhhBIHIYQQTihxEEII4YQSByGEEE4ocZC/TlxcHPz8/DjX67ubICFkcGg5Lvnr1NfXo7OzE7a2tpzqNTY24tOnT5gzZ84QtYyQfwMlDkIIIZzQVBUHGzduRHh4OK5duwYvLy8IhUJs2bIFzc3N+PTpEyIiIuDg4AAvLy/cuHGjX/07d+4gKCgIQqEQzs7OiI+P13ozq0qlwsGDB+Ht7Q2hUAiRSISEhAR8/vxZ63PEYjGSk5NRXFwMsViMefPmISwsTOfbYIer/6cv+05VXb58GTY2NqipqUF4eDjmzp2LJUuWoLCwUKte36mqhw8fwsbGBmVlZZBKpXBwcICbmxvOnz8PALhw4QI8PT3h6OiIHTt24OvXr2zdXx0rQkYieuUIR9XV1VAoFIiJiUFnZydSU1ORkJCA9vZ2LF26FMHBwSgsLMTOnTtRWlrK7mx28+ZNbN++Hf7+/oiIiEBbWxuOHTsGqVSK06dPA/h+M+rq6kJ0dDQmT56MxsZGyOVyhIWF4eLFi1rtuH37Nt68eQOZTIaOjg4cOnQI8fHxyM3N1XeXDNpg+/Jndu3ahcDAQISEhKCkpATJycmwtbWFo6PjgPUOHDiAgIAArF69GlevXkViYiLq6+tRWVkJmUwGhUKB1NRUHDt2DDKZDAC3sSJkxPlTm53/jYKDgxmhUMi0tLSwsczMTEYgEDByuZyNKRQKRiAQMMXFxQzDMExPTw8jEomY6Ohorc979uwZIxAImIqKCp2/r6uri6murmYEAgFTVVXFxkUiEbNo0SJGpVKxsbNnzzICgYD58uXLb7nWoTbYvmQYhomNjWV8fX3Z40uXLjECgYDJz89nYxqNhlmwYAGTkpKi9flz585lj8vLyxmBQMCkpaWxsc7OTkYoFDIuLi5MR0cHG09KSmIWLlz40+v52VgRMhLRfxwc2draYtKkSexx725pbm5ubMzU1BTGxsbs1FFdXR0+fPiA+Ph4dHd3s+Vmz54NPp+Ply9fwsnJCQBw9epV5OXloa6uDp2dnWzZuro62NnZscfz58+HgYEBe9z7JtfGxkadW6EOR4Ppy4H8WI/H48HCwgJNTU2c6o0bNw5TpkyBlZWV1lt5LSws0NLSgq6uLvB4PAC/PlaEjDSUODjquz1p702k7/7WPB4PGo0GANDa2grg+14Gunz8+BHA930gYmNjsWbNGuzYsQMmJiZQKpUICwuDWq3+pXb0LTecDaYvB9I3YfJ4vF/qD131dMUYhoFGowGPx+M0VoSMNJQ49KB328x9+/bpXAra+1f39evXYWtri5SUFPZcVVWVfhpJOKGxIv8yShx6YGlpCTMzM7x7907nNqe9VCoVxo4dqxUrKSkZ6uaRQaCxIv8yShx6MGrUKCQkJEAqleLbt2/w8PDAhAkT0NDQgLt372LTpk0QCoVwdXVFcnIyjh8/DkdHR9y/fx+3bt36080nOtBYkX8ZJQ49WbZsGU6dOoXs7Gzs3r0bDMPAzMwMrq6u7NaaQUFBeP/+Pc6dO4czZ87AxcUFmZmZ8Pf3/8OtJ33RWJF/GX1znBBCCCf0zXFCCCGcUOIghBDCCSUOQgghnFDiIIQQwgklDkIIIZxQ4iCEEMIJJQ5CCCGcUOIghBDCyf8AmwccRLuMKYUAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Computing Correlation for n200/ersp/feat\n", + "\n", + "data_n200 = n200_val[1:,1:].flatten()\n", + "data_ersp = ersp_val[1:,1:].flatten()\n", + "data_feat = feat_val[1:,1:].flatten()\n", + "\n", + "print \"N200 and ERSP correlation:\", scipy.stats.pearsonr(data_n200, data_ersp)\n", + "\n", + "import seaborn as sns; sns.set()\n", + "np.corrcoef(data_ersp, data_feat)\n", + "\n", + "sns.set(font_scale=1.4)\n", + "map_data = np.array([[1.0, 0.7349, 0.0025],[0.7349, 1.0, -0.229],[0.0025, -0.229, 1.0]])\n", + "labels = ['N200 \\n mean', 'N200 \\n minima', 'ERSP']\n", + "ax = sns.heatmap(map_data, vmin=-1, vmax=1.0, center=0, annot=True, linewidths=.5, cmap='PuOr', xticklabels=labels, yticklabels=labels)\n", + "# ax.tick_params(direction='out', length=6, width=2, colors='r', grid_color='r', grid_alpha=0.5)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1m Fixed Rule based Pairwise Evaluation \u001b[0m\n", + "Mean Accuracy:\n", + "N200 mean 0.7857142857142857\n", + "N200 minima 0.7142857142857143\n", + "ERSP 0.619047619047619\n", + "Decision 0.7857142857142857\n", + "Standard Deviation:\n", + "N200 mean 0.410325903324145\n", + "N200 minima 0.45175395145262565\n", + "ERSP 0.46899322865695736\n", + "Decision 0.410325903324145\n" + ] + } + ], + "source": [ + "# Pairwise Evaluation\n", + "\n", + "total = 0\n", + "# score_n200, score_feat, score_ersp, score_decn = 0, 0, 0, 0\n", + "dist_pref = 47\n", + "score_metric = np.zeros([4,total_sub+1])\n", + "\n", + "for sub_id in range(1,total_sub+1):\n", + " total = 0\n", + " score_n200, score_feat, score_ersp, score_decn = 0, 0, 0, 0\n", + " for prod_i in range(1,total_prod+1):\n", + " for prod_j in range(prod_i+1,total_prod+1):\n", + " if abs(pref[sub_id, prod_i] - pref[sub_id, prod_j]) > dist_pref:\n", + " \n", + " total = total + 1\n", + " ground_truth = (pref[sub_id, prod_i] > pref[sub_id, prod_j]) # True means \"prod_j\" is preferred\n", + " \n", + " count = 0\n", + " pred_n200 = n200_val[sub_id, prod_i] > n200_val[sub_id, prod_j]\n", + " if (pred_n200 == ground_truth):\n", + " score_n200 = score_n200 + 1 \n", + " count = count + 1\n", + " \n", + " pred_feat = feat_val[sub_id, prod_i] > feat_val[sub_id, prod_j]\n", + " if (pred_feat == ground_truth):\n", + " score_feat = score_feat + 1 \n", + " count = count + 1\n", + " \n", + " pred_ersp = ersp_val[sub_id, prod_i] > ersp_val[sub_id, prod_j]\n", + " if (pred_ersp == ground_truth):\n", + " score_ersp = score_ersp + 1 \n", + " count = count + 1\n", + " \n", + " if count > 1:\n", + " score_decn = score_decn + 1\n", + " \n", + " score_metric[0, sub_id] = score_n200*1.0/total\n", + " score_metric[1, sub_id] = score_feat*1.0/total\n", + " score_metric[2, sub_id] = score_ersp*1.0/total\n", + " score_metric[3, sub_id] = score_decn*1.0/total\n", + " \n", + "acc_mean = np.mean(score_metric[:,1:], axis=1)\n", + "acc_std = np.std(score_metric[:,1:], axis=1)\n", + "# print \"N200 Accuracy: \", \"Feat Accuracy: \", \"ERSP Accuracy: \", \"Decision Accuracy: \"\n", + "# print \"Mean: \", np.mean(score_metric[:,1:], axis=1)\n", + "# print \"Std: \", np.std(score_metric[:,1:], axis=1)\n", + "\n", + "method = ['N200 mean', 'N200 minima', 'ERSP', 'Decision']\n", + "\n", + "print \"\\033[1m Fixed Rule based Pairwise Evaluation \\033[0m\"\n", + "print \"Mean Accuracy:\"\n", + "for idx,item in enumerate(acc_mean):\n", + " print method[idx], item\n", + "print \"Standard Deviation:\" \n", + "for idx,item in enumerate(acc_std):\n", + " print method[idx], item" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1m Fixed Rule based Ranking \u001b[0m\n", + "Metrics: MHD, Tau, NDCG\n", + "N200 Mean [0.63888889 0.33333333 0.92837468]\n", + "ERSP [ 0.98888889 -0.15555556 0.83609209]\n", + "N200 minima [0.79444444 0.11111111 0.8795795 ]\n", + "Decision [0.78888889 0.15 0.89153368]\n", + "Random [ 0.99444444 -0.14444444 0.85200403]\n" + ] + } + ], + "source": [ + "## Ranking 3 prooducts\n", + "\n", + "rank_metrics = np.zeros([5,3]) # rows: N200, feat, ersp, decn1, 4 random col: avg dist, tau, NDCG\n", + "total = 0\n", + "\n", + "def tau_score(t_rank, p_rank):\n", + " tau, _ = stats.kendalltau(p_rank, t_rank)\n", + " return tau\n", + "\n", + "# Function to calculate NDCG score\n", + "def ndcg_score(rel, t_rank, p_rank):\n", + " rel = np.array(rel)\n", + " dcg = sum((rel)*1.0/np.log2(p_rank + 1))\n", + " idcg = sum((rel)*1.0/np.log2(t_rank + 1))\n", + " return dcg*1.0/idcg\n", + "\n", + "\n", + "for sub_id in range(3,4):#1,total_sub+1):\n", + " for prod_i in range(1,total_prod+1):\n", + " for prod_j in range(prod_i+1,total_prod+1):\n", + " for prod_k in range(prod_j+1,total_prod+1):\n", + "# if not ((pref[sub_id, prod_i] == pref[sub_id, prod_j]) or (pref[sub_id, prod_i] == pref[sub_id, prod_k]) or (pref[sub_id, prod_j] == pref[sub_id, prod_k])):\n", + " total = total + 1\n", + " ground_pref = [pref[sub_id, prod_i], pref[sub_id, prod_j], pref[sub_id, prod_k]] # [rank_i, rank_j, rank_k]\n", + " ground_ordr = np.argsort(ground_pref)\n", + " ground_rank = 3 - np.argsort(ground_ordr) # rank 1 means top, rank 3 means bottom\n", + " true_rel = ground_pref\n", + " \n", + " \n", + " pred_n200 = [n200_val[sub_id, prod_i], n200_val[sub_id, prod_j], n200_val[sub_id, prod_k]]\n", + " ordr_n200 = np.argsort(pred_n200)\n", + " rank_n200 = 3 - np.argsort(ordr_n200)\n", + "\n", + " pred_feat = [feat_val[sub_id, prod_i], feat_val[sub_id, prod_j], feat_val[sub_id, prod_k]]\n", + " ordr_feat = np.argsort(pred_feat)\n", + " rank_feat = 3 - np.argsort(ordr_feat)\n", + "\n", + " pred_ersp = [ersp_val[sub_id, prod_i], ersp_val[sub_id, prod_j], ersp_val[sub_id, prod_k]]\n", + " ordr_ersp = np.argsort(pred_ersp)\n", + " rank_ersp = 3 - np.argsort(ordr_ersp)\n", + " \n", + " # Computing for decision classifier 1\n", + " \n", + " if np.mean(rank_n200==rank_ersp)==1:\n", + " rank_decn = rank_n200\n", + " elif np.mean(rank_feat==rank_ersp)==1:\n", + " rank_decn = rank_feat\n", + " elif np.mean(rank_n200==rank_feat)==1:\n", + " rank_decn = rank_n200\n", + " else:\n", + " count = [0, 0, 0]\n", + " count[rank_n200[0]-1] = count[rank_n200[0]-1] + 2\n", + " count[rank_n200[1]-1] = count[rank_n200[1]-1] + 1\n", + " count[rank_feat[0]-1] = count[rank_feat[0]-1] + 2\n", + " count[rank_feat[1]-1] = count[rank_feat[1]-1] + 1\n", + " count[rank_ersp[0]-1] = count[rank_ersp[0]-1] + 2\n", + " count[rank_ersp[1]-1] = count[rank_ersp[1]-1] + 1\n", + " ordr_decn = np.argsort(count)\n", + " rank_decn = 3 - np.argsort(ordr_decn)\n", + " \n", + " # Computing for Random\n", + " pred_rand = np.random.uniform(0,1,[1,3])\n", + " ordr_rand = np.argsort(pred_rand)\n", + " rank_rand = 3 - np.argsort(ordr_rand)\n", + " rank_rand = rank_rand.flatten()\n", + " \n", + " all_ranks = [rank_n200, rank_ersp, rank_feat, rank_decn, rank_rand]\n", + " for idx_r, pred_rank in enumerate(all_ranks):\n", + " rank_metrics[idx_r,0] = rank_metrics[idx_r,0] + np.mean(abs(ground_rank - pred_rank))\n", + " rank_metrics[idx_r,1] = rank_metrics[idx_r,1] + tau_score(ground_rank, pred_rank)\n", + " rank_metrics[idx_r,2] = rank_metrics[idx_r,2] + ndcg_score(true_rel, ground_rank, pred_rank)\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "print \"\\033[1m Fixed Rule based Ranking \\033[0m\"\n", + "print \"Metrics: MHD, Tau, NDCG\"\n", + "methods = ['N200 Mean ', 'ERSP ', 'N200 minima', 'Decision ', 'Random ']\n", + "for idx, item in enumerate(rank_metrics):\n", + " print methods[idx], item/total\n", + " \n", + "# print rank_metrics/total\n", + "# print total\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/README.md b/README.md index 4d73a79..1836569 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,36 @@ # Cerebro Codes for the paper titled, "Cerebro: A Wearable Solution to Detect and Track User Preferences using Brainwaves" + +## Citation +------------ +The dataset and codes are freely available for research use. Please cite the following publication for using the codes and dataset +
+

Mohit Agarwal, Raghupathy Sivakumar
+Cerebro: A Wearable Solution to Detect and Track User Preferences using Brainwaves
+The 5th ACM Workshop on Wearable Systems and Applications. ACM, 2019.

+
+ +## Codes +-------- + +* Use `cerebro.ipynb` for replicating results in Figure 5,6,7, Table 1 and Section 4 +* Use `fixrule.ipynb` for replicating results in Figure 3 and Section 4 + + +## Contact +---------- + +[Mohit Agarwal](http://agmohit.com ) + +Email: me.agmohit@gmail.com + +## Dependencies +--------------- +`mne-python` + + +## References +------------- + +[1] [Cerebro: A Wearable Solution to Detect and Track User Preferences using Brainwaves](http://gnan.ece.gatech.edu/archive/agarwal-cerebro.pdf) +[2] [Using EEG to Predict Consumers’ Future Choices](https://stanford.edu/~knutson/nfc/telpaz15.pdf) diff --git a/cerebro.ipynb b/cerebro.ipynb new file mode 100644 index 0000000..16592d2 --- /dev/null +++ b/cerebro.ipynb @@ -0,0 +1,704 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "#### Written and Copyright by Mohit Agarwal\n", + "#### Georgia Institute of Technology\n", + "#### Email: me.agmohit@gmail.com\n", + "\n", + "## Please cite the following publication if you are using the codes for your study and publication\n", + "# Agarwal, Mohit, and Raghupathy Sivakumar. \n", + "# \"Cerebro: A Wearable Solution to Detect and Track User Preferences using Brainwaves.\" \n", + "# The 5th ACM Workshop on Wearable Systems and Applications. ACM, 2019.\n", + "\n", + "### Code to decode Product Preference data [10 products] and rank it\n", + "### Uses fix rule based on N200, Min and ERSP features" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "##Importing Libraries\n", + "\n", + "import numpy as np\n", + "import math\n", + "import scipy.io\n", + "import os\n", + "from sklearn.decomposition import FastICA\n", + "import mne\n", + "from mne.time_frequency import psd_multitaper\n", + "import matplotlib.pyplot as plt\n", + "import random\n", + "import itertools\n", + "from scipy import stats\n", + "\n", + "from sklearn import svm\n", + "from sklearn.neighbors import KNeighborsClassifier\n", + "from sklearn.linear_model import LinearRegression\n", + "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n", + "from sklearn.ensemble import RandomForestClassifier\n", + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.linear_model import ElasticNet\n", + "from sklearn.model_selection import cross_val_score\n", + "from sklearn.metrics import confusion_matrix\n", + "from sklearn.metrics import accuracy_score\n", + "\n", + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "mne.set_log_level('ERROR')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "## Parameters\n", + "\n", + "chan_list = ['Fp1','F3','F7','C3','T7','P3','P7','O1','Pz','Fp2','Fz','F4','F8','Cz','C4','T8','P4','P8','O2']\n", + "\n", + "selected_chan = [1,3,8,10,11,13,14]\n", + "total_chan = len(selected_chan)\n", + "\n", + "total_sub = 14 # Anything from 1-14\n", + "total_prod = 10 \n", + "freq = 256.0\n", + "time_len = 768\n", + "\n", + "time = [(x-freq)*1.0/freq for x in xrange(1,time_len+1)]\n", + "time = np.array(time)\n", + "\n", + "n200_ind = [idx for idx, t_ind in enumerate(time) if (t_ind>=0.2 and t_ind<=0.3)]\n", + "n200_ind = np.array(n200_ind)\n", + "\n", + "erp_ind = [idx for idx, t_ind in enumerate(time) if (t_ind>=0.2 and t_ind<=0.3)]\n", + "erp_ind = np.array(erp_ind)\n", + "erp_len = len(erp_ind)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Loading data\n", + "data_dict = {}\n", + "data_dir = 'data/'\n", + "list_of_files = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]\n", + "for curr_file in list_of_files:\n", + " if '_data.mat' in curr_file:\n", + " sub_id = int(curr_file[1:3])\n", + " data_dict[sub_id] = scipy.io.loadmat(os.path.join(data_dir,curr_file))\n", + " if curr_file == 'WA2.mat':\n", + " WA2 = scipy.io.loadmat(os.path.join(data_dir,curr_file))['WA2']\n", + " WA2 = np.delete(WA2,11,0) # to remove 12th subject\n", + " if curr_file == 'outChoices.mat':\n", + " outChoices = scipy.io.loadmat(os.path.join(data_dir,curr_file))['out']\n", + "# WA2 contains 14(sub) x 10 (prod) : value represents # of times the product was chosen total\n", + "# outChoices : column 0: if prod_i was chosen? column 7: sub_id*100+prod_1 , col 8: sub_id*100+prod_2\n", + "choices = np.zeros([total_sub+1, total_prod+1, total_prod+1])\n", + "for idx in range(outChoices.shape[0]):\n", + " sub_id = int(outChoices[idx, 7]//100)\n", + " sub_id_2 = int(outChoices[idx, 8]//100)\n", + " if sub_id == 12:\n", + " continue\n", + " if sub_id > 12:\n", + " sub_id = sub_id - 1\n", + " sub_id_2 = sub_id_2 - 1\n", + " assert sub_id>0 and sub_id <= (total_sub+1) and sub_id == sub_id_2, \"Error 1: error decoding\"+str(sub_id)\n", + " prod_1 = int(outChoices[idx, 7]%100)\n", + " prod_2 = int(outChoices[idx, 8]%100)\n", + " assert prod_1 > 0 and prod_1 <= total_prod and prod_2 > 0 and prod_2 <= total_prod, \"Error 2: error decoding \"+str(prod_2)\n", + " if prod_1 > prod_2 or prod_1==prod_2:\n", + " print \"check it baby\", prod_1, prod_2\n", + " if outChoices[idx, 0] == 0:\n", + " choices[sub_id, prod_1, prod_2] = choices[sub_id, prod_1, prod_2] + 1\n", + " elif outChoices[idx, 0] == 1:\n", + " choices[sub_id, prod_2, prod_1] = choices[sub_id, prod_2, prod_1] + 1\n", + " \n", + " \n", + "pref = np.zeros([total_sub+1, total_prod+1])\n", + "pref[1:,1:] = WA2\n", + "#data_dict[sub_id]['sig'], ['lab'] contains original eeg signals (25x768x500) and labels (1x500)\n", + "# What are channels?" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "ncomp = 1\n", + "sig_data = np.zeros([total_sub+1, total_prod+1, 768, 7])\n", + "ica_data = np.zeros([total_sub+1, total_prod+1, 768, 1])\n", + "n200_val = np.zeros([total_sub+1, total_prod+1])\n", + "ersp_val = np.zeros([total_sub+1, total_prod+1])\n", + "feat_val = np.zeros([total_sub+1, total_prod+1])\n", + "for sub_id in range(1,total_sub+1):\n", + " sig = data_dict[sub_id]['sig']\n", + " lab = data_dict[sub_id]['lab'].flatten()\n", + " for prod_id in range(1,total_prod+1):\n", + " sig_prod = sig[selected_chan, :, np.argwhere(lab==prod_id)] \n", + " avg_sig_chan = np.transpose(np.mean(sig_prod, axis=0))\n", + " # compute ICA\n", + " ica_sig = FastICA(n_components=ncomp)\n", + " S_ = ica_sig.fit_transform(avg_sig_chan) # Get the estimated sources\n", + " A_sig = ica_sig.mixing_ # Get estimated mixing matrix\n", + " A_sig_norm = np.linalg.norm(A_sig,axis=0)\n", + " S_ = S_*A_sig_norm\n", + " \n", + " if sum(A_sig)<0:\n", + " S_ = -1*S_\n", + " \n", + " \n", + " \n", + " n200val = np.mean(S_[n200_ind])\n", + " featval = min(S_[erp_ind])\n", + " \n", + " info = mne.create_info(ch_names=['ica'], sfreq=freq, ch_types=['eeg'])\n", + " raw = mne.io.RawArray(np.transpose(S_[256:]), info)\n", + " psds, freqs = psd_multitaper(raw, low_bias=True, tmin=0.1, tmax=0.5, fmin=13, fmax=26, proj=True, n_jobs=1)\n", + " raw2 = mne.io.RawArray(np.transpose(S_), info)\n", + " psds2, freqs2 = psd_multitaper(raw2, low_bias=True, tmin=0.5, tmax=1.0, fmin=13, fmax=26, proj=True, n_jobs=1)\n", + " erspval = np.mean(10*np.log10(psds)) - np.mean(10*np.log10(psds2))\n", + "\n", + " n200_val[sub_id, prod_id] = 10*np.log10(1 + abs(n200val)**2)\n", + " ersp_val[sub_id, prod_id] = erspval\n", + " feat_val[sub_id, prod_id] = 10*np.log10(1 + featval**2)\n", + " \n", + " sig_data[sub_id, prod_id, :, :] = avg_sig_chan\n", + " ica_data[sub_id, prod_id, :, :] = S_\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pairwise Accuracy [Max - Mean - std]\n", + "Validation \t 0.2531710586881039 0.21766517997334361 0.016270837736846972\n", + "Train \t 0.19017083810008625 0.15890894110548684 0.014288618519160958\n", + "Test \t 0.06923970322558004 -0.02751868787131508 0.03926872634262003\n", + "Ranking Accuracy Mean [Train - Test - Random]\n", + "Dist \t [0.46996599 0.67698413 0.88809524]\n", + "Tau \t [0.51007937 0.27103175 0.00595238]\n", + "NDCG \t [0.95695327 0.92002218 0.87499508]\n", + "Ranking Accuracy Max [Train - Test ]\n", + "Dist \t [0.36462585 0.33333333]\n", + "Tau \t [0.62585034 0.66666667]\n", + "NDCG \t [0.97544729 0.97425431]\n", + "Ranking Accuracy Std [Train - Test]\n", + "Dist \t [0.0393621 0.11796065]\n", + "Tau \t [0.04224192 0.1390764 ]\n", + "NDCG \t [0.00860341 0.03838353]\n" + ] + } + ], + "source": [ + "# Ranking Evaluation\n", + "\n", + "nsim = 1\n", + "\n", + "train_split = 7\n", + "test_split = 10 - train_split\n", + "\n", + "train_split_cv = 6\n", + "test_split_cv = train_split - train_split_cv\n", + "\n", + "total_splits = int(scipy.special.comb(10, train_split))\n", + "\n", + "total_samples_train = int(scipy.special.comb(train_split, 2))*2\n", + "total_samples_test = int(scipy.special.comb(test_split, 2))\n", + "\n", + "total_samples_train_cv = int(scipy.special.comb(train_split_cv, 2))*2\n", + "total_samples_test_cv = int(scipy.special.comb(test_split_cv, 2))\n", + "total_samples_train_rank_cv = int(scipy.special.comb(train_split_cv, 3))\n", + "\n", + "total_samples_train_rank = int(scipy.special.comb(train_split, 3))\n", + "total_samples_test_rank = int(scipy.special.comb(test_split, 3))\n", + "\n", + "X_train, y_train = np.zeros([total_samples_train, 3]), np.zeros([total_samples_train,])\n", + "X_train_rank, y_train_rank, y_train_rel = np.zeros([total_samples_train_rank, 9]), np.zeros([total_samples_train_rank,3]), np.zeros([total_samples_train_rank,3])\n", + "\n", + "X_test, y_test = np.zeros([total_samples_test, 3]), np.zeros([total_samples_test,])\n", + "X_test_rank, y_test_rank, y_test_rel = np.zeros([total_samples_test_rank, 9]), np.zeros([total_samples_test_rank, 3]), np.zeros([total_samples_test_rank, 3])\n", + "\n", + "rank_metrics = np.zeros([total_sub + 1, total_splits, 3, 3]) # rows: ML-train, ML-test, random col: avg dist, tau, NDCG\n", + "rank_count = np.zeros([total_sub + 1, total_splits, 3, 3])\n", + "\n", + "# Function to calculate NDCG score\n", + "def ndcg_score(rel, t_rank, p_rank):\n", + " dcg = sum(rel*1.0/np.log2(p_rank + 1))\n", + " idcg = sum(rel*1.0/np.log2(t_rank + 1))\n", + " return dcg*1.0/idcg\n", + "\n", + "def cv_train(sub_id, train_prod):\n", + "# Step 2: Preparing Training Dataset\n", + " cv_train_acc = []\n", + " for cv_train_prod_s in itertools.combinations(train_prod, train_split_cv):\n", + " cv_test_prod = list(set(train_prod)-set(cv_train_prod_s))\n", + " cv_train_prod = list(cv_train_prod_s)\n", + "\n", + " X_train_cv, y_train_cv = np.zeros([total_samples_train_cv, 3]), np.zeros([total_samples_train_cv,])\n", + " X_train_rank_cv, y_train_rank_cv, y_train_rel_cv = np.zeros([total_samples_train_rank_cv, 9]), np.zeros([total_samples_train_rank_cv,3]), np.zeros([total_samples_train_rank_cv,3])\n", + "# X_test_cv, y_test_cv = np.zeros([total_samples_test_cv, 3]), np.zeros([total_samples_test_cv,])\n", + "\n", + " sample_idx_train_cv = 0\n", + " for i in range(train_split_cv):\n", + " for j in range(i+1, train_split_cv):\n", + " prod_i, prod_j = cv_train_prod[i], cv_train_prod[j]\n", + "\n", + " y_train_cv[sample_idx_train_cv] = np.sign(pref[sub_id, prod_i] - pref[sub_id, prod_j])\n", + " X_train_cv[sample_idx_train_cv,:] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]]) - np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + "\n", + " sample_idx_train_cv = sample_idx_train_cv + 1\n", + " y_train_cv[sample_idx_train_cv] = np.sign(pref[sub_id, prod_j] - pref[sub_id, prod_i])\n", + " X_train_cv[sample_idx_train_cv,:] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]]) - np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + "\n", + " sample_idx_train_cv = sample_idx_train_cv + 1\n", + "\n", + " idx_train_cv = range(total_samples_train_cv)\n", + " random.shuffle(idx_train_cv)\n", + " X_train_cv = X_train_cv[idx_train_cv, :]\n", + " y_train_cv = y_train_cv[idx_train_cv, ]\n", + "\n", + " # Step 2A: Preparing Training dataset for training evaluation\n", + " sample_idx_train_rank_cv = 0\n", + " for i in range(train_split_cv):\n", + " for j in range(i+1, train_split_cv):\n", + " for k in range(j+1, train_split_cv):\n", + " prod_i, prod_j, prod_k = cv_train_prod[i], cv_train_prod[j], cv_train_prod[k]\n", + " if not ((pref[sub_id, prod_i] == pref[sub_id, prod_j]) or (pref[sub_id, prod_i] == pref[sub_id, prod_k]) or (pref[sub_id, prod_j] == pref[sub_id, prod_k])):\n", + "\n", + " ground_pref_cv = [pref[sub_id, prod_i], pref[sub_id, prod_j], pref[sub_id, prod_k]] # [rank_i, rank_j, rank_k]\n", + "# print ground_pref_cv\n", + " ground_ordr_cv = np.argsort(ground_pref_cv)\n", + " ground_rank_cv = 3 - np.argsort(ground_ordr_cv) # rank 1 means top, rank 3 means bottom\n", + " y_train_rank_cv[sample_idx_train_rank_cv,:] = ground_rank_cv\n", + " y_train_rel_cv[sample_idx_train_rank_cv,:] = ground_pref_cv\n", + "\n", + " X_train_rank_cv[sample_idx_train_rank_cv,0:3] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + " X_train_rank_cv[sample_idx_train_rank_cv,3:6] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + " X_train_rank_cv[sample_idx_train_rank_cv,6:9] = np.array([n200_val[sub_id, prod_k], feat_val[sub_id, prod_k], ersp_val[sub_id, prod_k]])\n", + " sample_idx_train_rank_cv = sample_idx_train_rank_cv + 1\n", + "\n", + "\n", + "\n", + " clf = ElasticNet(l1_ratio=0.05, alpha=0.1, normalize=True)\n", + " clf.fit(X_train_cv, y_train_cv) \n", + "\n", + " cv_train_acc.append(clf.score(X_train_cv, y_train_cv))\n", + "\n", + " for eval_idx_cv in range(sample_idx_train_rank_cv):\n", + " # Distance Metric\n", + "\n", + " pred_pref_cv = [clf.predict(np.reshape(X_train_rank_cv[eval_idx_cv,0:3],[1,-1]))[0], clf.predict(np.reshape(X_train_rank_cv[eval_idx_cv,3:6],[1,-1]))[0], clf.predict(np.reshape(X_train_rank_cv[eval_idx_cv,6:9],[1,-1]))[0]]\n", + " ord_pref_cv = np.argsort(pred_pref_cv)\n", + " pred_rank_cv = 3 - np.argsort(ord_pref_cv)\n", + " true_rank_cv = y_train_rank_cv[eval_idx_cv,]\n", + " dist_ml_train_cv = np.mean(abs(true_rank_cv - pred_rank_cv)) \n", + "\n", + " true_rel_cv = y_train_rel_cv[eval_idx_cv,]\n", + "\n", + " return cv_train_acc\n", + "\n", + "\n", + "\n", + "acc_metric = np.zeros([total_sub + 1, total_splits, 3])\n", + "acc_count = np.zeros([total_sub + 1, total_splits, 3])\n", + "\n", + "test_comb = []\n", + "\n", + "\n", + "\n", + "for sub_id in range(1,total_sub+1):\n", + " \n", + " prod_cnt = pref[sub_id,1:]\n", + " prod_ord = np.flip(np.argsort(prod_cnt))\n", + " sort_idx = 10 - np.argsort(prod_ord)\n", + " \n", + " for sim_idx in range(nsim):\n", + "\n", + " acc_idx = 0\n", + "\n", + " # Step 1: Choosing products for training/testing : All possible permutations/combinations\n", + " for subset in itertools.combinations(range(10), train_split):\n", + " train_prod = 1 + prod_ord[list(subset)]\n", + " \n", + " test_prod = list(set(1+prod_ord)-set(train_prod))\n", + " \n", + " subset_test = 1 + np.sort(list(set(range(10))-set(subset)))\n", + " \n", + "\n", + " if sub_id==2:\n", + " test_comb.append(subset_test)\n", + "\n", + " cv_train_acc = cv_train(sub_id, train_prod)#0\n", + " \n", + " \n", + " \n", + " sample_idx_train = 0\n", + " for i in range(train_split):\n", + " for j in range(i+1, train_split):\n", + " prod_i, prod_j = train_prod[i], train_prod[j]\n", + " if not pref[sub_id, prod_i] == pref[sub_id, prod_j]:\n", + "\n", + " y_train[sample_idx_train] = np.sign(pref[sub_id, prod_i] - pref[sub_id, prod_j])\n", + " X_train[sample_idx_train,:] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]]) - np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + "\n", + " sample_idx_train = sample_idx_train + 1\n", + " y_train[sample_idx_train] = np.sign(pref[sub_id, prod_j] - pref[sub_id, prod_i])\n", + " X_train[sample_idx_train,:] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]]) - np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + "\n", + " sample_idx_train = sample_idx_train + 1\n", + "\n", + " idx_train = range(total_samples_train)\n", + " random.shuffle(idx_train)\n", + " X_train = X_train[idx_train, :]\n", + " y_train = y_train[idx_train, ]\n", + " \n", + " # Step 2A: Preparing Training dataset for training evaluation\n", + " sample_idx_train_rank = 0\n", + " for i in range(train_split):\n", + " for j in range(i+1, train_split):\n", + " for k in range(j+1, train_split):\n", + " prod_i, prod_j, prod_k = train_prod[i], train_prod[j], train_prod[k]\n", + " if not ((pref[sub_id, prod_i] == pref[sub_id, prod_j]) or (pref[sub_id, prod_i] == pref[sub_id, prod_k]) or (pref[sub_id, prod_j] == pref[sub_id, prod_k])):\n", + " \n", + " ground_pref = [pref[sub_id, prod_i], pref[sub_id, prod_j], pref[sub_id, prod_k]] # [rank_i, rank_j, rank_k]\n", + " ground_ordr = np.argsort(ground_pref)\n", + " ground_rank = 3 - np.argsort(ground_ordr) # rank 1 means top, rank 3 means bottom\n", + " y_train_rank[sample_idx_train_rank,:] = ground_rank\n", + " y_train_rel[sample_idx_train_rank,:] = ground_pref\n", + " \n", + " X_train_rank[sample_idx_train_rank,0:3] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + " X_train_rank[sample_idx_train_rank,3:6] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + " X_train_rank[sample_idx_train_rank,6:9] = np.array([n200_val[sub_id, prod_k], feat_val[sub_id, prod_k], ersp_val[sub_id, prod_k]])\n", + " sample_idx_train_rank = sample_idx_train_rank + 1\n", + " \n", + " # Step 3: Preparing Testing Dataset for pairwise evaluation\n", + " sample_idx_test = 0\n", + " for i in range(test_split):\n", + " for j in range(i+1, test_split):\n", + " prod_i, prod_j = test_prod[i], test_prod[j]\n", + " if not pref[sub_id, prod_i] == pref[sub_id, prod_j]:\n", + " y_test[sample_idx_test] = np.sign(pref[sub_id, prod_i] - pref[sub_id, prod_j])\n", + " X_test[sample_idx_test,:] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]]) - np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + "\n", + " sample_idx_test = sample_idx_test + 1\n", + " \n", + " # Step 3B: Preparing Testing Dataset for Ranking\n", + " sample_idx_test_rank = 0\n", + " for i in range(test_split):\n", + " for j in range(i+1, test_split):\n", + " for k in range(j+1, test_split):\n", + " prod_i, prod_j, prod_k = test_prod[i], test_prod[j], test_prod[k]\n", + " if not ((pref[sub_id, prod_i] == pref[sub_id, prod_j]) or (pref[sub_id, prod_i] == pref[sub_id, prod_k]) or (pref[sub_id, prod_j] == pref[sub_id, prod_k])):\n", + " \n", + " ground_pref = [pref[sub_id, prod_i], pref[sub_id, prod_j], pref[sub_id, prod_k]] # [rank_i, rank_j, rank_k]\n", + " ground_ordr = np.argsort(ground_pref)\n", + " ground_rank = 3 - np.argsort(ground_ordr) # rank 1 means top, rank 3 means bottom\n", + " y_test_rank[sample_idx_test_rank,:] = ground_rank\n", + " y_test_rel[sample_idx_test_rank,:] = ground_pref\n", + " \n", + " X_test_rank[sample_idx_test_rank,0:3] = np.array([n200_val[sub_id, prod_i], feat_val[sub_id, prod_i], ersp_val[sub_id, prod_i]])\n", + " X_test_rank[sample_idx_test_rank,3:6] = np.array([n200_val[sub_id, prod_j], feat_val[sub_id, prod_j], ersp_val[sub_id, prod_j]])\n", + " X_test_rank[sample_idx_test_rank,6:9] = np.array([n200_val[sub_id, prod_k], feat_val[sub_id, prod_k], ersp_val[sub_id, prod_k]])\n", + "\n", + " sample_idx_test_rank = sample_idx_test_rank + 1\n", + "\n", + "\n", + " # Step 4: Computing Accuracy for Pairwise \n", + " clf = ElasticNet(l1_ratio=0.05, alpha=0.1, normalize=True) \n", + "\n", + " clf.fit(X_train, y_train) \n", + "\n", + " classification_acc = [np.mean(cv_train_acc), clf.score(X_train, y_train), clf.score(X_test, y_test)]\n", + " acc_metric[sub_id, acc_idx, :] = acc_metric[sub_id, acc_idx, :] + classification_acc\n", + " acc_count[sub_id, acc_idx, :] = acc_count[sub_id, acc_idx, :] + [1,1,1]\n", + " \n", + " # Step 5: Computing Accuracy for Ranking \n", + " \n", + " \n", + " coef = clf.coef_.ravel() / np.linalg.norm(clf.coef_) \n", + " \n", + " # Step 5A: For Training Data\n", + " dist_ml_train, tau_ml_train, ndcg_ml_train = 0, 0, 0\n", + " for eval_idx in range(total_samples_train_rank):\n", + " # Distance Metric\n", + " pred_pref = [np.dot(X_train_rank[eval_idx,0:3],coef), np.dot(X_train_rank[eval_idx,3:6],coef), np.dot(X_train_rank[eval_idx,6:9],coef)]\n", + " ord_pref = np.argsort(pred_pref)\n", + " pred_rank = 3 - np.argsort(ord_pref)\n", + " true_rank = y_train_rank[eval_idx,]\n", + " dist_ml_train = dist_ml_train + np.mean(abs(true_rank - pred_rank)) \n", + " # Tau Metric\n", + " tau, _ = stats.kendalltau(pred_rank, true_rank)\n", + " tau_ml_train = tau_ml_train + tau\n", + " # NDCG Metric\n", + " true_rel = y_train_rel[eval_idx,]\n", + " ndcg_ml_train = ndcg_ml_train + ndcg_score(true_rel, true_rank, pred_rank)\n", + " \n", + "\n", + " rank_metrics[sub_id, acc_idx, 0, 0] = rank_metrics[sub_id, acc_idx, 0, 0] + dist_ml_train*1.0/total_samples_train_rank\n", + " rank_metrics[sub_id, acc_idx, 0, 1] = rank_metrics[sub_id, acc_idx, 0, 1] + tau_ml_train*1.0/total_samples_train_rank\n", + " rank_metrics[sub_id, acc_idx, 0, 2] = rank_metrics[sub_id, acc_idx, 0, 2] + ndcg_ml_train*1.0/total_samples_train_rank\n", + " rank_count[sub_id, acc_idx, 0, 0] = rank_count[sub_id, acc_idx, 0, 0] + 1\n", + " rank_count[sub_id, acc_idx, 0, 1] = rank_count[sub_id, acc_idx, 0, 1] + 1\n", + " rank_count[sub_id, acc_idx, 0, 2] = rank_count[sub_id, acc_idx, 0, 2] + 1\n", + " \n", + " dist_ml_test, tau_ml_test, ndcg_ml_test = 0, 0, 0\n", + " dist_rand, tau_rand, ndcg_rand = 0, 0, 0\n", + " for eval_idx in range(total_samples_test_rank):\n", + " pred_pref = [np.dot(X_test_rank[eval_idx,0:3],coef), np.dot(X_test_rank[eval_idx,3:6],coef), np.dot(X_test_rank[eval_idx,6:9],coef)] \n", + "\n", + " ord_pref = np.argsort(pred_pref)\n", + " pred_rank = 3 - np.argsort(ord_pref)\n", + " true_rank = y_test_rank[eval_idx,]\n", + " dist_ml_test = dist_ml_test + np.mean(abs(true_rank - pred_rank))\n", + " tau, _ = stats.kendalltau(pred_rank, true_rank)\n", + " tau_ml_test = tau_ml_test + tau\n", + " true_rel = y_test_rel[eval_idx,]\n", + " ndcg_ml_test = ndcg_ml_test + ndcg_score(true_rel, true_rank, pred_rank)\n", + " \n", + " pred_rand = np.random.uniform(0,1,[1,3])\n", + " ordr_rand = np.argsort(pred_rand)\n", + " rank_rand = 3 - np.argsort(ordr_rand)\n", + " rank_rand = rank_rand.flatten()\n", + " rank_worst = np.array([true_rank[1], true_rank[0], true_rank[2]])\n", + " dist_rand = dist_rand + np.mean(abs(true_rank - rank_rand))\n", + " tau, _ = stats.kendalltau(rank_rand, true_rank)\n", + " tau_rand = tau_rand + tau\n", + " ndcg_rand = ndcg_rand + ndcg_score(true_rel, true_rank, rank_rand)\n", + " \n", + " rank_metrics[sub_id, acc_idx, 1, 0] = rank_metrics[sub_id, acc_idx, 1, 0] + dist_ml_test*1.0/total_samples_test_rank\n", + " rank_metrics[sub_id, acc_idx, 1, 1] = rank_metrics[sub_id, acc_idx, 1, 1] + tau_ml_test*1.0/total_samples_test_rank\n", + " rank_metrics[sub_id, acc_idx, 1, 2] = rank_metrics[sub_id, acc_idx, 1, 2] + ndcg_ml_test*1.0/total_samples_test_rank\n", + " rank_count[sub_id, acc_idx, 1, 0] = rank_count[sub_id, acc_idx, 1, 0] + 1 \n", + " rank_count[sub_id, acc_idx, 1, 1] = rank_count[sub_id, acc_idx, 1, 1] + 1\n", + " rank_count[sub_id, acc_idx, 1, 2] = rank_count[sub_id, acc_idx, 1, 2] + 1\n", + " \n", + " rank_metrics[sub_id, acc_idx, 2, 0] = rank_metrics[sub_id, acc_idx, 2, 0] + dist_rand*1.0/total_samples_test_rank\n", + " rank_metrics[sub_id, acc_idx, 2, 1] = rank_metrics[sub_id, acc_idx, 2, 1] + tau_rand*1.0/total_samples_test_rank\n", + " rank_metrics[sub_id, acc_idx, 2, 2] = rank_metrics[sub_id, acc_idx, 2, 2] + ndcg_rand*1.0/total_samples_test_rank\n", + " rank_count[sub_id, acc_idx, 2, 0] = rank_count[sub_id, acc_idx, 2, 0] + 1 \n", + " rank_count[sub_id, acc_idx, 2, 1] = rank_count[sub_id, acc_idx, 2, 1] + 1\n", + " rank_count[sub_id, acc_idx, 2, 2] = rank_count[sub_id, acc_idx, 2, 2] + 1\n", + " \n", + " acc_idx = acc_idx + 1\n", + "\n", + "\n", + "acc_metric[1:,:,:] = acc_metric[1:,:,:]/acc_count[1:,:,:]\n", + "rank_metrics[1:,:,:,:] = rank_metrics[1:,:,:,:]/rank_count[1:,:,:,:]\n", + "\n", + "\n", + "\n", + "# acc_metric_persub = acc_metric\n", + "acc_metric_subavg = np.mean(acc_metric[1:,:,:],axis=0)\n", + "# rank_metrics_persub = rank_metrics\n", + "rank_metrics_subavg = np.mean(rank_metrics[1:,:,:,:],axis=0)\n", + "\n", + "print \"Pairwise Accuracy [Max - Mean - std]\"\n", + "print \"Validation \\t \", max(acc_metric_subavg[:,0]), np.mean(acc_metric_subavg[:,0]), np.std(acc_metric_subavg[:,0])\n", + "print \"Train \\t \", max(acc_metric_subavg[:,1]), np.mean(acc_metric_subavg[:,1]), np.std(acc_metric_subavg[:,1])\n", + "print \"Test \\t \", max(acc_metric_subavg[:,2]), np.mean(acc_metric_subavg[:,2]), np.std(acc_metric_subavg[:,2])\n", + "\n", + "print \"Ranking Accuracy Mean [Train - Test - Random]\"\n", + "print \"Dist \\t \", np.mean(rank_metrics_subavg[:,:,0],axis=0)\n", + "print \"Tau \\t \", np.mean(rank_metrics_subavg[:,:,1],axis=0)\n", + "print \"NDCG \\t \", np.mean(rank_metrics_subavg[:,:,2],axis=0)\n", + "\n", + "# For random, max and std - does not make sense\n", + "print \"Ranking Accuracy Max [Train - Test ]\"\n", + "print \"Dist \\t \", np.min(rank_metrics_subavg[:,0:2,0],axis=0)\n", + "print \"Tau \\t \", np.max(rank_metrics_subavg[:,0:2,1],axis=0)\n", + "print \"NDCG \\t \", np.max(rank_metrics_subavg[:,0:2,2],axis=0)\n", + "\n", + "print \"Ranking Accuracy Std [Train - Test]\"\n", + "print \"Dist \\t \", np.std(rank_metrics_subavg[:,0:2,0],axis=0)\n", + "print \"Tau \\t \", np.std(rank_metrics_subavg[:,0:2,1],axis=0)\n", + "print \"NDCG \\t \", np.std(rank_metrics_subavg[:,0:2,2],axis=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Top 5 Subjects: Avg NDCG, std 0.9729758779950863 0.0007864680056827411\n", + "Top 5 Subjects: Avg MD, std 0.42857142857142855 0.05216405309573009\n", + "Top 5 Combinations: Avg NDCG, std 0.9609647540217887 0.0044628265143876475\n", + "Top 5 Combinations: Avg MD, std 0.4777777777777775 0.020786985482077337\n" + ] + } + ], + "source": [ + "avgrank_persub = np.mean(rank_metrics[1:,:,1,:], axis=0)\n", + "avg_ndcg_persub_top5 = np.sort(avgrank_persub[:,2])[-5:]\n", + "print \"Top 5 Subjects: Avg NDCG, std\", np.mean(avg_ndcg_persub_top5), np.std(avg_ndcg_persub_top5)\n", + "avg_md_persub_top5 = np.sort(avgrank_persub[:,0])[:5]\n", + "print \"Top 5 Subjects: Avg MD, std\", np.mean(avg_md_persub_top5), np.std(avg_md_persub_top5)\n", + "\n", + "avgrank_percomb = np.mean(rank_metrics[1:,:,1,:], axis=1)\n", + "avg_ndcg_percomb_top5 = np.sort(avgrank_percomb[:,2])[-5:]\n", + "print \"Top 5 Combinations: Avg NDCG, std\", np.mean(avg_ndcg_percomb_top5), np.std(avg_ndcg_percomb_top5)\n", + "avg_md_percomb_top5 = np.sort(avgrank_percomb[:,0])[:5]\n", + "print \"Top 5 Combinations: Avg MD, std\", np.mean(avg_md_percomb_top5), np.std(avg_md_percomb_top5)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAD8CAYAAACb4nSYAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAE5ZJREFUeJzt3X+s3fd91/Hnq9e10FZCguyWEcexJzmUIcqSnrlcQrq7hhQDU/IHU3XTTWsmUQvaBFSUokRIZXJUBTFNZWNWkdttIkDrRdYaeZDVsZJddVQ3la9pSrFNUs/d8DWDeGkyFIpw477545yrnZzYPsf2uffccz/Ph3R07vfz/Xyv3+er49f9nM/5/khVIUlqw9smXYAkae0Y+pLUEENfkhpi6EtSQwx9SWqIoS9JDTH0Jakhhr4kNcTQl6SGbJp0AYO2bNlSO3bsmHQZkjRVjh8//kdVtXVYv3UX+jt27GBpaWnSZUjSVEnyB6P0c3pHkhoyUugn2ZPkxSSnkzxyifWfSfJC7/FSktf61m1P8kySU0lOJtkxvvIlSVdj6PROkhlgP3APsAwcS3K4qk6u9KmqT/T1fwi4ve9XPAF8uqqOJnkH8P1xFS9JujqjjPR3A6er6kxVXQAOAvddof/9wBcBkvwIsKmqjgJU1etV9d3rrFmSdI1GCf2bgbN9y8u9trdIciuwE3iu13Qb8FqS30zy9SS/0PvkMLjd3iRLSZbOnz9/da9AkjSycX+ROw8cqqqLveVNwF3Aw8CPAT8MPDC4UVUdqKpOVXW2bh16xJEk6RqNEvrngFv6lrf12i5lnt7UTs8y8EJvaugN4CngjmspdBSLi/D4491nSdJbjXKc/jFgV5KddMN+HvjwYKck7wZuAhYHtr0xydaqOg98AFiVg/AXF+Huu+HCBdi8GZ59FmZnV+NfkqTpNXSk3xuhPwgcAU4BT1bViST7ktzb13UeOFh9N93tTfM8DDyb5JtAgM+N8wWsWFjoBv7Fi93nhYXV+FckabqNdEZuVT0NPD3Q9qmB5Z+/zLZHgfdcY30jm5vrjvBXRvpzc6v9L0rS9Fl3l2G4VrOz3SmdhYVu4Du1I0lvtWFCH7pBb9hL0uV57R1JaoihL0kNMfQlqSGGviQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9SWqIoS9JDTH0Jakhhr4kNcTQl6SGGPqS1BBDX5IaYuhLUkMMfUlqiKEvSQ0x9CWpIYa+JDVkpNBPsifJi0lOJ3nkEus/k+SF3uOlJK8NrL8hyXKSXxlX4ZKkq7dpWIckM8B+4B5gGTiW5HBVnVzpU1Wf6Ov/EHD7wK95DPjKWCqWJF2zUUb6u4HTVXWmqi4AB4H7rtD/fuCLKwtJ3gu8C3jmegqVJF2/UUL/ZuBs3/Jyr+0tktwK7ASe6y2/DfhF4OHrK1OSNA7j/iJ3HjhUVRd7yx8Dnq6q5SttlGRvkqUkS+fPnx9zSZKkFUPn9IFzwC19y9t6bZcyD3y8b3kWuCvJx4B3AJuTvF5Vb/oyuKoOAAcAOp1OjVi7JOkqjRL6x4BdSXbSDft54MODnZK8G7gJWFxpq6qf7lv/ANAZDHxJ0toZOr1TVW8ADwJHgFPAk1V1Ism+JPf2dZ0HDlaVI3VJWqey3jK60+nU0tLSpMuQpKmS5HhVdYb184xcSWqIoS9JDTH0Jakhhr4kNcTQl6SGGPqS1BBDX5IaYuhLUkMMfUlqiKEvSQ0x9CWpIYa+JDXE0Jekhhj6ktQQQ1+SGmLoS1JDDH1JaoihL0kNMfQlqSGGviQ1xNCXpIYY+kMsLsLjj3efJWnabRqlU5I9wC8BM8Dnq+qfD6z/DPATvcUfAN5ZVTcm+VHgs8ANwEXg01X1G+MqfrUtLsLdd8OFC7B5Mzz7LMzOTroqSbp2Q0M/yQywH7gHWAaOJTlcVSdX+lTVJ/r6PwTc3lv8LvCzVfWtJH8eOJ7kSFW9Ns4XsVoWFrqBf/Fi93lhwdCXNN1Gmd7ZDZyuqjNVdQE4CNx3hf73A18EqKqXqupbvZ//B/AysPX6Sl47c3PdEf7MTPd5bm7SFUnS9Rlleudm4Gzf8jLwvkt1THIrsBN47hLrdgObgd+7+jInY3a2O6WzsNANfEf5kqbdSHP6V2EeOFRVF/sbk/wQ8G+Bj1TV9wc3SrIX2Auwffv2MZd0fWZnDXtJG8co0zvngFv6lrf12i5lnt7UzookNwD/EfinVfX8pTaqqgNV1amqztatUzP7I0lTZ5TQPwbsSrIzyWa6wX54sFOSdwM3AYt9bZuBLwFPVNWh8ZQsSbpWQ0O/qt4AHgSOAKeAJ6vqRJJ9Se7t6zoPHKyq6mv7EPB+4IEkL/QePzrG+iVJVyFvzujJ63Q6tbS0NOkyJGmqJDleVZ1h/TwjV5IaYuhL0jqwVpd8Gfchm5Kkq7SWl3xxpC9JE3apS76sFkNfkiZsLS/54vSOJE3YWl7yxdCXpHVgrS754vSOJDXE0Jekhhj6ktQQQ1+SGmLoS1JDDH1JaoihL0kNMfQlqSGGviQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9SWqIoS9JDRkp9JPsSfJiktNJHrnE+s8keaH3eCnJa33rPpLkW73HR8ZZ/DRbq5sgS1K/oTdRSTID7AfuAZaBY0kOV9XJlT5V9Ym+/g8Bt/d+/rPAPwM6QAHHe9u+OtZXMWXW8ibIktRvlJH+buB0VZ2pqgvAQeC+K/S/H/hi7+e/CRytqu/0gv4osOd6Ct4I1vImyJLUb5TQvxk427e83Gt7iyS3AjuB565225as5U2QJanfuO+ROw8cqqqLV7NRkr3AXoDt27ePuaT1Zy1vgixJ/UYJ/XPALX3L23ptlzIPfHxg27mBbRcGN6qqA8ABgE6nUyPUNPXW6ibIktRvlOmdY8CuJDuTbKYb7IcHOyV5N3AT0H88yhHgg0luSnIT8MFemyRpAoaO9KvqjSQP0g3rGeDXqupEkn3AUlWt/AGYBw5WVfVt+50kj9H9wwGwr6q+M96XIEkaVfoyel3odDq1tLQ06TIkaaokOV5VnWH9PCNXkhpi6EtSQwx9SWqIoS9JDTH0Jakhhr4kNcTQl6SGGPqS1BBDX5IaYuhLUkMMfUlqiKEvSQ0x9CXpKiwuwuOPd5+n0bjvnCVJG9biItx9d/fe1ps3d++AN203Q3KkL0kjWljoBv7Fi93nhYVJV3T1DH1JGtHcXHeEPzPTfZ6bm3RFV8/pHUka0exsd0pnYaEb+NM2tQOGviRdldnZ6Qz7FU7vSFJDDH1JaoihL0kNMfQlqSEjhX6SPUleTHI6ySOX6fOhJCeTnEjyhb72f9FrO5Xkl5NkXMVLkq7O0KN3kswA+4F7gGXgWJLDVXWyr88u4FHgzqp6Nck7e+1/DbgTeE+v638CfhxYGOeLkKRLWVyc7sMrV8Moh2zuBk5X1RmAJAeB+4CTfX0+CuyvqlcBqurlXnsBfwrYDAR4O/C/xlO6JF3eRrhkwmoYZXrnZuBs3/Jyr63fbcBtSb6a5PkkewCqahH4HeAPe48jVXXq+suWpCvbCJdMWA3jOjlrE7ALmAO2AV9J8peBLcBf7LUBHE1yV1X9bv/GSfYCewG2b98+ppLa5MdZqWvlkgkrI/1pvGTCahgl9M8Bt/Qtb+u19VsGvlZV3wO+neQl/uSPwPNV9TpAkt8GZoE3hX5VHQAOAHQ6nbr6lyHw46zUbyNcMmE1jDK9cwzYlWRnks3APHB4oM9TdAOeJFvoTvecAf478ONJNiV5O90vcZ3eWSV+nJXebHYWHn3UwO83NPSr6g3gQeAI3cB+sqpOJNmX5N5etyPAK0lO0p3D/2RVvQIcAn4P+CbwDeAbVfVbq/A6xMa4AqCk1ZWq9TWb0ul0amlpadJlTC3n9KU2JTleVZ1h/bzK5gYz7VcAlLS6vAyDJDXE0Jekhhj6ktQQQ1+SGmLoS1JDDH1JaoihL0kNMfQlqSGGviQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9SWqIoS9JDTH0Jakhhr4kNcTQlzRxi4vw+OPdZ60ub5coaaIWF+Huu+HCBdi8GZ591lt+riZH+pImamGhG/gXL3afFxYmXdHGZuhLmqi5ue4If2am+zw3N+mKNraRQj/JniQvJjmd5JHL9PlQkpNJTiT5Ql/79iTPJDnVW79jPKVL2ghmZ7tTOo895tTOWhg6p59kBtgP3AMsA8eSHK6qk319dgGPAndW1atJ3tn3K54APl1VR5O8A/j+WF+BpKk3O2vYr5VRRvq7gdNVdaaqLgAHgfsG+nwU2F9VrwJU1csASX4E2FRVR3vtr1fVd8dWvSTpqowS+jcDZ/uWl3tt/W4Dbkvy1STPJ9nT1/5akt9M8vUkv9D75CBJmoBxfZG7CdgFzAH3A59LcmOv/S7gYeDHgB8GHhjcOMneJEtJls6fPz+mkiRJg0YJ/XPALX3L23pt/ZaBw1X1var6NvAS3T8Cy8ALvamhN4CngDsG/4GqOlBVnarqbN269Vpeh1aZJ89IG8MoJ2cdA3Yl2Uk37OeBDw/0eYruCP/Xk2yhO61zBngNuDHJ1qo6D3wAWBpX8VobnjwjbRxDR/q9EfqDwBHgFPBkVZ1Isi/Jvb1uR4BXkpwEfgf4ZFW9UlUX6U7tPJvkm0CAz63GC9Hq8eQZaeMY6TIMVfU08PRA26f6fi7gH/ceg9seBd5zfWVqklZOnlkZ6XvyjDS9vPaOhlo5eWZhoRv4Tu1I08vQ10g8eUbaGLz2jiQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9TYyXdpDWnodsaiK8tIM0GY70NRFe2kGaDENfE+F9UaXJcHpHE+GlHaTJMPQ1MV7aQVp7Tu9IUkMMfUlqiKEvSQ0x9CWpIYa+JDXE0Jekhhj6ktQQQ1+SGmLoS1JDRgr9JHuSvJjkdJJHLtPnQ0lOJjmR5AsD625IspzkV8ZRtCTp2gy9DEOSGWA/cA+wDBxLcriqTvb12QU8CtxZVa8meefAr3kM+Mr4ypYkXYtRRvq7gdNVdaaqLgAHgfsG+nwU2F9VrwJU1csrK5K8F3gX8Mx4SpauzJuzSJc3ygXXbgbO9i0vA+8b6HMbQJKvAjPAz1fVl5O8DfhF4GeAv3H95UpX5s1ZpCsb1xe5m4BdwBxwP/C5JDcCHwOerqrlK22cZG+SpSRL58+fH1NJapE3Z1ldfoqafqOM9M8Bt/Qtb+u19VsGvlZV3wO+neQlun8EZoG7knwMeAewOcnrVfWmL4Or6gBwAKDT6dQ1vRKJP7k5y8pI35uzjI+fojaGUUb6x4BdSXYm2QzMA4cH+jxFd5RPki10p3vOVNVPV9X2qtoBPAw8MRj40jit3JzlsccMpXHzU9TGMHSkX1VvJHkQOEJ3vv7XqupEkn3AUlUd7q37YJKTwEXgk1X1ymoWLl2ON2dZHX6K2hhStb5mUzqdTi0tLU26DEmXsLjoLS7XqyTHq6ozrJ+3S5Q0Mj9FTT8vwyBtQB5lo8txpC9tMB5loytxpC9tMB5loysx9KURTcuUycpRNjMzHmWjt3J6RxrBNE2ZrJyr4FE2uhRDXxrBpaZM1nOYepSNLsfpHWkETploo3CkL41gNadMPOFJa8nQl0a0GlMm0/RdgTYGp3ekCfLwSq01Q1+aIL8r0FpzekeaIA+v1Foz9KUJ8/BKrSWndySpIYa+JDXE0Jekhhj6ktQQQ1+SGmLoS1JD1t2N0ZOcB/7gOn7FFuCPxlTORuT+Gc59dGXun+EmsY9uraqtwzqtu9C/XkmWRrkjfKvcP8O5j67M/TPcet5HTu9IUkMMfUlqyEYM/QOTLmCdc/8M5z66MvfPcOt2H224OX1J0uVtxJG+JOkypjb0k8wk+XqS/3CJdQ8kOZ/khd7j702ixklK8vtJvtl7/UuXWJ8kv5zkdJL/kuSOSdQ5KSPsn7kkf9z3HvrUJOqcpCQ3JjmU5L8lOZVkdmB96++hYftnXb6HpvnSyv8IOAXccJn1v1FVD65hPevRT1TV5Y4V/lvArt7jfcBne88tudL+AfjdqvrJNatm/fkl4MtV9VNJNgM/MLC+9ffQsP0D6/A9NJUj/STbgL8DfH7StUyx+4Anqut54MYkPzTporQ+JPkzwPuBXwWoqgtV9dpAt2bfQyPun3VpKkMf+JfAPwG+f4U+f7f3kfNQklvWqK71pIBnkhxPsvcS628GzvYtL/faWjFs/wDMJvlGkt9O8pfWsrh1YCdwHvj13jTq55P84ECflt9Do+wfWIfvoakL/SQ/CbxcVcev0O23gB1V9R7gKPBv1qS49eWvV9UddD+CfzzJ+ydd0DozbP/8Z7qntf8V4F8BT611gRO2CbgD+GxV3Q78H+CRyZa0royyf9ble2jqQh+4E7g3ye8DB4EPJPl3/R2q6pWq+n+9xc8D713bEievqs71nl8GvgTsHuhyDuj/BLSt19aEYfunqv53Vb3e+/lp4O1Jtqx5oZOzDCxX1dd6y4fohly/lt9DQ/fPen0PTV3oV9WjVbWtqnYA88BzVfUz/X0G5hXvpfuFbzOS/GCSP73yM/BB4L8OdDsM/GzvCIy/CvxxVf3hGpc6EaPsnyR/Lkl6P++m+3/llbWudVKq6n8CZ5P8hV7T3cDJgW7NvodG2T/r9T00zUfvvEmSfcBSVR0G/mGSe4E3gO8AD0yytgl4F/Cl3vttE/CFqvpykr8PUFX/Gnga+NvAaeC7wM9NqNZJGGX//BTwD5K8AfxfYL7aO5PxIeDf945MOQP8nO+hNxm2f9ble8gzciWpIVM3vSNJunaGviQ1xNCXpIYY+pLUEENfkhpi6EtSQwx9SWqIoS9JDfn/7EajSIEsm8sAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "## Plot the MD pattern as per combinations used in test rank\n", + "\n", + "chosen_metric = rank_metrics[:,:,1,0]\n", + "res = np.zeros([120*14,2])\n", + "\n", + "idx = 0\n", + "for sub_id in range(1, total_sub+1):\n", + " for comb_id in range(0,120):\n", + " prod_cnt = np.sort(pref[sub_id,1:])\n", + "# te_comb = test_comb[comb_id]-1\n", + "# tr_comb = list(set(range(10))-set(te_comb))\n", + "# myprod_pref = prod_cnt[te_comb]\n", + "# res[idx, 0] = np.mean(np.diff(myprod_pref))\n", + " te_comb = test_comb[comb_id]\n", + " tr_comb = list(set(range(1,11))-set(te_comb))\n", + "# print tr_comb, te_comb\n", + " res[idx, 0] = np.mean(tr_comb)\n", + " res[idx, 1] = chosen_metric[sub_id, comb_id]\n", + " idx = idx + 1\n", + "\n", + "resolution = 0.15\n", + "val_min = int((min(res[:,0]) + 0.5)*100)\n", + "val_max = int((max(res[:,0]) - 0.5)*100 + resolution*100)\n", + "interval = 1.0\n", + "\n", + "for idx in range(val_min,val_max,int(resolution*100)):\n", + " range_min = idx/100.0 - 0.5\n", + " range_max = idx/100.0 + 0.5\n", + " xx = (res[:,0]range_min)\n", + " yy = np.mean(res[np.argwhere(xx),1])\n", + " plt.plot(idx/100.0,yy,'b.')" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Best and Worst testing/Training Accuracies\n", + "0.4714285714285715 0.8809523809523808\n", + "0.7304761904761905 0.19918367346938773\n" + ] + } + ], + "source": [ + "max_ind = 10\n", + "\n", + "best_acc, worst_acc = [], []\n", + "\n", + "best_acc_tr, worst_acc_tr = [], []\n", + "\n", + "for sub_idx in range(1, total_sub+1):\n", + " chosen_metric_test = (rank_metrics[sub_idx, :,1,0])\n", + "# chosen_metric_train = (rank_metrics[sub_idx, :,0,2])\n", + " chosen_metric_train = (rank_metrics[sub_idx, :, 0, 0])# - (acc_metric[sub_idx, :, 0])) #abs(rank_metrics[sub_idx, :, 0,2])\n", + "# chosen_metric_train = (rank_metrics[sub_idx, :, 0, 2]) #acc_metric[sub_idx, :, 0]\n", + "\n", + " sort_ind_train = np.argsort(chosen_metric_train)\n", + "\n", + "# print chosen_metric_train[sort_ind_train]\n", + "\n", + " worst_ind_train = sort_ind_train[:max_ind]\n", + " best_ind_train = list(sort_ind_train[-max_ind:])\n", + "\n", + " chosen_metric_test_val = np.array(chosen_metric_test).flatten()\n", + " chosen_metric_train_val = np.array(chosen_metric_train).flatten()\n", + "\n", + " best_train_idx = np.array(best_ind_train).flatten()\n", + " best_acc.append(np.mean(chosen_metric_test_val[best_train_idx]))\n", + " best_acc_tr.append(np.mean(chosen_metric_train_val[best_train_idx]))\n", + "\n", + " worst_train_idx = np.array(worst_ind_train).flatten()\n", + " worst_acc.append(np.mean(chosen_metric_test_val[worst_train_idx]))\n", + " worst_acc_tr.append(np.mean(chosen_metric_train_val[worst_train_idx]))\n", + " \n", + "# print best_acc\n", + "# print worst_acc\n", + "\n", + "print \"Best and Worst testing/Training Accuracies\"\n", + "print np.mean(best_acc), np.mean(worst_acc)\n", + "print np.mean(best_acc_tr), np.mean(worst_acc_tr)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/data/README.md b/data/README.md new file mode 100644 index 0000000..21a1926 --- /dev/null +++ b/data/README.md @@ -0,0 +1,10 @@ +# Data +Readme for the dataset in the paper Predicting Future Choices + + +## EEG Data +14 .mat files for subjects 1-11,13-15 (pre-processed) including +* sig: 25x768x500 (raw-preprocessed EEG signals) +* lab: 500x1 (product id) + +`outChoices.mat` and `WA2.mat` diff --git a/data/S01_data.mat b/data/S01_data.mat new file mode 100644 index 0000000..06242ca Binary files /dev/null and b/data/S01_data.mat differ diff --git a/data/S02_data.mat b/data/S02_data.mat new file mode 100644 index 0000000..c4f6f96 Binary files /dev/null and b/data/S02_data.mat differ diff --git a/data/S03_data.mat b/data/S03_data.mat new file mode 100644 index 0000000..d32a2da Binary files /dev/null and b/data/S03_data.mat differ diff --git a/data/S04_data.mat b/data/S04_data.mat new file mode 100644 index 0000000..6fd874a Binary files /dev/null and b/data/S04_data.mat differ diff --git a/data/S05_data.mat b/data/S05_data.mat new file mode 100644 index 0000000..e234c68 Binary files /dev/null and b/data/S05_data.mat differ diff --git a/data/S06_data.mat b/data/S06_data.mat new file mode 100644 index 0000000..8577131 Binary files /dev/null and b/data/S06_data.mat differ diff --git a/data/S07_data.mat b/data/S07_data.mat new file mode 100644 index 0000000..7031f3e Binary files /dev/null and b/data/S07_data.mat differ diff --git a/data/S08_data.mat b/data/S08_data.mat new file mode 100644 index 0000000..de0b855 Binary files /dev/null and b/data/S08_data.mat differ diff --git a/data/S09_data.mat b/data/S09_data.mat new file mode 100644 index 0000000..a0b8c16 Binary files /dev/null and b/data/S09_data.mat differ diff --git a/data/S10_data.mat b/data/S10_data.mat new file mode 100644 index 0000000..a0f591f Binary files /dev/null and b/data/S10_data.mat differ diff --git a/data/S11_data.mat b/data/S11_data.mat new file mode 100644 index 0000000..dc12ae4 Binary files /dev/null and b/data/S11_data.mat differ diff --git a/data/S12_data.mat b/data/S12_data.mat new file mode 100644 index 0000000..162a1a8 Binary files /dev/null and b/data/S12_data.mat differ diff --git a/data/S13_data.mat b/data/S13_data.mat new file mode 100644 index 0000000..ab78be1 Binary files /dev/null and b/data/S13_data.mat differ diff --git a/data/S14_data.mat b/data/S14_data.mat new file mode 100644 index 0000000..e634687 Binary files /dev/null and b/data/S14_data.mat differ diff --git a/data/WA2.mat b/data/WA2.mat new file mode 100644 index 0000000..2815067 Binary files /dev/null and b/data/WA2.mat differ diff --git a/data/outChoices.mat b/data/outChoices.mat new file mode 100644 index 0000000..5435794 Binary files /dev/null and b/data/outChoices.mat differ diff --git a/fixrule.ipynb b/fixrule.ipynb new file mode 100644 index 0000000..9b7ce67 --- /dev/null +++ b/fixrule.ipynb @@ -0,0 +1,421 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "#### Written and Copyright by Mohit Agarwal\n", + "#### Georgia Institute of Technology\n", + "#### Email: me.agmohit@gmail.com\n", + "\n", + "## Please cite the following publication if you are using the codes for your study and publication\n", + "# Agarwal, Mohit, and Raghupathy Sivakumar. \n", + "# \"Cerebro: A Wearable Solution to Detect and Track User Preferences using Brainwaves.\" \n", + "# The 5th ACM Workshop on Wearable Systems and Applications. ACM, 2019.\n", + "\n", + "### Code to decode Product Preference data [10 products] and rank it\n", + "### Uses fix rule based on N200, Min and ERSP features" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "##Importing Libraries\n", + "\n", + "import numpy as np\n", + "import scipy.io\n", + "import os\n", + "from sklearn.decomposition import FastICA\n", + "import mne\n", + "from mne.time_frequency import psd_multitaper\n", + "import matplotlib.pyplot as plt\n", + "from scipy import stats\n", + "\n", + "mne.set_log_level('ERROR')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "## Parameters\n", + "\n", + "chan_list = ['Fp1','F3','F7','C3','T7','P3','P7','O1','Pz','Fp2','Fz','F4','F8','Cz','C4','T8','P4','P8','O2']\n", + "\n", + "selected_chan = [1,3,8,10,11,13,14]\n", + "total_chan = len(selected_chan)\n", + "\n", + "total_sub = 14 # Anything from 1-14\n", + "total_prod = 10 \n", + "freq = 256.0\n", + "time_len = 768\n", + "\n", + "time = [(x-freq)*1.0/freq for x in xrange(1,time_len+1)]\n", + "time = np.array(time)\n", + "\n", + "n200_ind = [idx for idx, t_ind in enumerate(time) if (t_ind>=0.2 and t_ind<=0.3)]\n", + "n200_ind = np.array(n200_ind)\n", + "\n", + "erp_ind = [idx for idx, t_ind in enumerate(time) if (t_ind>=0.2 and t_ind<=0.3)]\n", + "erp_ind = np.array(erp_ind)\n", + "erp_len = len(erp_ind)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Loading data\n", + "data_dict = {}\n", + "data_dir = 'data/'\n", + "list_of_files = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]\n", + "for curr_file in list_of_files:\n", + " if '_data.mat' in curr_file:\n", + " sub_id = int(curr_file[1:3])\n", + " data_dict[sub_id] = scipy.io.loadmat(os.path.join(data_dir,curr_file))\n", + " if curr_file == 'WA2.mat':\n", + " WA2 = scipy.io.loadmat(os.path.join(data_dir,curr_file))['WA2']\n", + " WA2 = np.delete(WA2,11,0) # to remove 12th subject\n", + " if curr_file == 'outChoices.mat':\n", + " outChoices = scipy.io.loadmat(os.path.join(data_dir,curr_file))['out']\n", + "# WA2 contains 14(sub) x 10 (prod) : value represents # of times the product was chosen total\n", + "# outChoices : column 0: if prod_i was chosen? column 7: sub_id*100+prod_1 , col 8: sub_id*100+prod_2\n", + "choices = np.zeros([total_sub+1, total_prod+1, total_prod+1])\n", + "for idx in range(outChoices.shape[0]):\n", + " sub_id = int(outChoices[idx, 7]//100)\n", + " sub_id_2 = int(outChoices[idx, 8]//100)\n", + " if sub_id == 12:\n", + " continue\n", + " if sub_id > 12:\n", + " sub_id = sub_id - 1\n", + " sub_id_2 = sub_id_2 - 1\n", + " assert sub_id>0 and sub_id <= (total_sub+1) and sub_id == sub_id_2, \"Error 1: error decoding\"+str(sub_id)\n", + " prod_1 = int(outChoices[idx, 7]%100)\n", + " prod_2 = int(outChoices[idx, 8]%100)\n", + " assert prod_1 > 0 and prod_1 <= total_prod and prod_2 > 0 and prod_2 <= total_prod, \"Error 2: error decoding \"+str(prod_2)\n", + " if prod_1 > prod_2 or prod_1==prod_2:\n", + " print \"check it baby\", prod_1, prod_2\n", + " if outChoices[idx, 0] == 0:\n", + " choices[sub_id, prod_1, prod_2] = choices[sub_id, prod_1, prod_2] + 1\n", + " elif outChoices[idx, 0] == 1:\n", + " choices[sub_id, prod_2, prod_1] = choices[sub_id, prod_2, prod_1] + 1\n", + " \n", + " \n", + "pref = np.zeros([total_sub+1, total_prod+1])\n", + "pref[1:,1:] = WA2\n", + "#data_dict[sub_id]['sig'], ['lab'] contains original eeg signals (25x768x500) and labels (1x500)\n", + "# What are channels?" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "ncomp = 1\n", + "n200_val = np.zeros([total_sub+1, total_prod+1])\n", + "ersp_val = np.zeros([total_sub+1, total_prod+1])\n", + "feat_val = np.zeros([total_sub+1, total_prod+1])\n", + "for sub_id in range(1,total_sub+1):\n", + " sig = data_dict[sub_id]['sig']\n", + " lab = data_dict[sub_id]['lab'].flatten()\n", + " for prod_id in range(1,total_prod+1):\n", + " sig_prod = sig[selected_chan, :, np.argwhere(lab==prod_id)] \n", + " avg_sig_chan = np.transpose(np.mean(sig_prod, axis=0))\n", + " # compute ICA\n", + " ica_sig = FastICA(n_components=ncomp)\n", + " S_ = ica_sig.fit_transform(avg_sig_chan) # Get the estimated sources\n", + " A_sig = ica_sig.mixing_ # Get estimated mixing matrix\n", + " A_sig_norm = np.linalg.norm(A_sig,axis=0)\n", + " S_ = S_*A_sig_norm\n", + " \n", + " if sum(A_sig)<0:\n", + " S_ = -1*S_\n", + " \n", + " n200val = np.mean(S_[n200_ind])\n", + " featval = min(S_[erp_ind])\n", + " \n", + " info = mne.create_info(ch_names=['ica'], sfreq=freq, ch_types=['eeg'])\n", + " raw = mne.io.RawArray(np.transpose(S_[256:]), info)\n", + " psds, freqs = psd_multitaper(raw, low_bias=True, tmin=0.1, tmax=0.5, fmin=13, fmax=26, proj=True, n_jobs=1)\n", + " raw2 = mne.io.RawArray(np.transpose(S_), info)\n", + " psds2, freqs2 = psd_multitaper(raw2, low_bias=True, tmin=0.5, tmax=1.0, fmin=13, fmax=26, proj=True, n_jobs=1)\n", + " erspval = np.mean(10*np.log10(psds)) - np.mean(10*np.log10(psds2))\n", + "\n", + " n200_val[sub_id, prod_id] = -1*abs(n200val)\n", + " ersp_val[sub_id, prod_id] = -1*erspval\n", + " feat_val[sub_id, prod_id] = featval\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "N200 and ERSP correlation: (0.0025271910663613034, 0.9763588794947393)\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEXCAYAAAC6baP3AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3XdUk9cbB/BvGGGHISiCIgKCDKFoFUFFq4B1g6OlQ1DcCHXXUW0dHHHWggxx1dWf1jpQobXFUbEVFyhQBVRGVUC2hB0g+f1BSY1JIKEQhs/nHM6R+9773vui5uHOl8Hj8XgghBBCJCTX3g0ghBDSuVDgIIQQIhUKHIQQQqRCgYMQQohUKHAQQgiRCgUOQgghUqHAQQghRCoUOAghhEiFAgchhBCpUOAghBAiFQochBBCpKLQ3g0ghJDOxpYxUeK8SbyoNmxJ++gygUOav0givSReFHycQ9q7GV3a4Vg/VFVVt3czujQVFeX2bkKX0GUCByGEyIqc3Ls9yk+BgxBCpMRgyLd3E9oVBQ5CCJGSvLxiezehXVHgIIQQKdFQFSGEEKkwGBQ4CCGESEFOjuY4CCGESIF6HIQQQqRCk+OEEEKkQpPjhBBCpEL7OAghhEiFehyEEEKkQpPjhBBCpELLcQkhhEiFVlURQgiRyrs+x/FuPz0hhLQAgyEn8Zc0srKyMGfOHNjb22Po0KHYsmULqqqqmi1XWVmJXbt2wcXFBXZ2dnBzc0NISAg4HE5LH7FJ1OMghBAptcUcB5vNhpeXFwwMDBAUFITi4mIEBgaiuLgYe/bsabLsxo0bceXKFSxbtgz9+vVDUlISgoODwWazsW7dulZvKwUOQgiRUlusqjp16hTYbDYiIyOho6MDAJCXl8fKlSvh6+uLfv36iSxXV1eHy5cvY+7cuZg5cyYAYOjQocjJyUFUVFSbBA4aqiKEECnJyytK/CWp2NhYDB06lB80AGDs2LFgMpmIjY0VW47H46G+vh4aGhoC6SwWCzweT/qHkwD1OAghRErSTI6z2Wyw2WyhdBaLBRaLxf8+PT0d06ZNE8jDZDJhZGSEjIwMsfdXVFTElClTcPz4cQwcOBBmZmZITk7G6dOn8fnnn0vcTmlQ4CCEEClJc+TI0aNHERISIpTu5+cHf39//vdsNlsgkDRisVgoLS1tso7Nmzfjm2++wUcffcRPmzVrFvz8/CRupzQocBBCiJSk6XF4e3vDw8NDKF1UkGip3bt348aNGwgICICxsTEePnyI0NBQ6OrqYt68ea1WT6N2CRzl5eV49eoVampqhK5ZW1u3Q4sIIURycvIMifO+PSTVVD5RQ1psNhsmJiZiyz158gSHDx9GWFgYxowZAwAYPHgw6urqEBwcjE8++QTq6uoSt1cSMg0cr169wrp16xAXFyd0jcfjgcFgICUlRZZNIoQQqTHkW39dkampKdLT0wXSOBwOnj9/jqlTp4ot9+zZMwCApaWlQLqVlRU4HA7y8vI6d+DYsGED0tLSsGbNGpiZmUFR8d3etk8I6ZwUmK0fOJydnREeHo6SkhJoa2sDAGJiYsDhcDBy5Eix5QwNDQEAjx49goGBAT/9r7/+AoPBEEhrLTINHAkJCdi8eTMmTJggy2oJIaRVtUWPw9PTEydOnICvry98fX1RVFSEbdu2Yfz48TAzM+PnW7duHSIjI/H48WMAgI2NDWxtbfHNN9+gqKgIffr0QVJSEvbv349p06ZBRUWl1dsq08ChqakJNTU1WVZJCCGtTpo5DkmxWCwcPXoUAQEB8Pf3h5KSEiZMmIBVq1YJ5ONyuaivr+d/Ly8vj3379iEoKAj79+9HYWEhevbsCR8fHyxYsKDV2wkADF4zO0TS09MRGxuLjIwM/pIwTU1NmJiYwNnZGaamphJXdurUKfz222+IiIho9WEqW8bEVr1fa1NRU8bsVVNhPdgc1oP7QUdPE9+tOYLD28+0d9MkksSLgo+z8JLCtqSgKAd3Hwc4ullAjaWMlxlFOH/wDh7de95kuS+DPNDf3lDs9eVTv8frwgoAgLvPENg49IFeTxaUVBRRnF+OpLgsRB+/j7LS6tZ8nGYdjvVDVdV/r5PD4SA8PAxRUVFgs9kwMzPD4sWL4eQ0rNmybDYbQUHf4dq1a6iqqoK1tQ2WL18uctHKw4cPERT0HVJSUqCqqgoXF1csXboUqqqq/DyZmZmIjIxEXFwcXr58AVVVVfTvb4lFixYJ3TM8PBwREfuE6mEymbh7914LfhLCVFSUW+U+0vxfOBzbNkti25PYHkd1dTW++uor/Pzzz1BUVISRkRF/ZUBGRgYuXLiAHTt2YPz48di6dSuUlJSarezp06fIysqCq6srBg8eDE1NTaE869ev/w+P03Fp67Kw8JtP8epFAVIfpMPJbWB7N6nDm7PWBYNGmeLKmSTkvXgNpw/7Y+n2idi5LBJPEnPElos6fh+xUY8E0uTk5TBr1Qd49eI1P2gAgHH/7shKzcOdK09QXVmLnn204TzRCnZOxtg450fUVNW22fO1la+/3oArV67g008/hZFRH1y6dAn+/v6IiNiP999/X2w5LpcLf39/PHmSBi8vb+jo6OD06R8xb95c/PDD/9C3b19+3tTUVCxYMB/GxsZYvnwF8vPzcfz4Mfz999+IiIjg5zt//hzOnz8PFxcXfPTRRygvL8fZs2fg5TUTISGhcHR0FGrHmjVroaHx72RuR3z3RVv0ODoTsYFj165d+PPPP7Fz5064ubmByWQKXOdwOIiJiUFAQAB27twp0Qf+9evXATSsgY6Pjxe6zmAwumzgKMgtxhgDLxTkFsOgT3dczjrc3k3q0PpadoeDizl+2ncLv/wvAQDw56+p2HLkE3zkOwwBC34SW/bx/RdCaQMcjKCgKI/bMWkC6d99GSWUN/3RKyzeMg72w/vidsyT//gkspWcnIzLly9jyZIlmD3bBwAwadIkTJ8+DXv2fIsffvif2LIxMTFITHyI7du3Y+zYDwEArq6umDJlMsLCwrBz505+3r1790JdXR0HDx7iH3VhYGCAzZs34ebNmxgxYgQA4MMPx2HhwkUCvRB3d3d4eLgjPDxMZOBwcXGBrq7uf/9htCEGBQ7RoqOjsXbtWkycKHoIiMlkYsKECaitrcX27dsl+sC/du1ay1vaydVy6lCQW9zezeg03h9pBm49Fzcu/ttzqOPU42Z0CqYvcEQ3fQ0UvSqT+H5D3SzA5fIQJ0EgaLyvqnrzveiO5sqVK5CTk8O0adP5aUpKSnB398DevcHIzs7mr8IRVVZbWxuurm78NB0dHbi5uSEqKgrV1dVQVlZGeXk57ty5DU9PT4HzkSZNmoRdu3bit99+4wcOKysroXq0tLQwcOBA3LlzR2Q7eDweysvLoaqq2mHfe6HA7Hi9IFkS+7dSXV0tUdTX1dVFdbVsx4JJ12fUTxf5OWxUlgtuEs1MyQMA9OmnJ/G9mMoKsB/WF08Sc1CSXy50ncEA1DWVodlNFeZ2Bvh0iTPq67hITXj53x6iHaSmpqJXr15CG85sbGwAAGlpqU2WtbCwEPqwtrGxQU1NDbKyMgE0DDnX1dXBykpwjkJRUREWFhZN1tGosLAIWlpaIq9NmTIZw4cPg5OTI9asWYOCgoJm7ydrcvJyEn91RWJ7HAMHDkRoaChsbGxEzkUAQGlpKcLCwpocNxUlJSUFmZmZIl8y4u7uLtW9SNek2U0NpUUVQumNaVq6kq/OGzjCBMqqTMT9JvoDTc9AE9tOzuR/X5xfhv1bfkPO3yVStrr9FRYWQE9POKg2/hKYny/+Q7iwsAB2dnYiyurxy/bvb4nCwkIAgJ6e8C+Wurp6yMrKarKNCQkJSEpKhI/PHIF0FosFT09P2NragclkIiEhAadP/4i//krGyZOnhE5/bU80VCXG119/jZkzZ2LUqFFwdHSEmZkZ/y+urKwM6enpiIuL4y8hk0RpaSnmzZuHpKQkMBgM/pG/DMa/fwkUOAgAMJUU8Lq2Xii9ltOQpqgk+UpyRzcLcGrqcP/3dJHXSwrKsWtZJBSVFNDHXA+DnE2hpNI5N6fW1NSIXLHYuHhF1DE/b5Z9ey6zoSxToGxNTcMIg6Ki6LxNjUAUFxdh7do1MDQ0xJw5goHjs88+E/jexcUFNjY2WLduLU6dOol58+aLva+s0eS4GH369EF0dDROnjyJmzdv4syZM/xzVFgsFkxNTbFo0SKhcc6m7NixA9XV1YiMjIS7uzsOHToELS0tXLx4EdevX0dwcHDrPBXp9Dg1dVBUFB5HVvxnbLm2pk6i+7C0VWA1qDcSbmagqkL0azRrOfV4HN8wLJV4Kwsp8S+xNnQaykqqkBiX1aL2txclJSXU1gqvBGv80G9q9aOSkpLIUYCaGo5AWSWlhiWttbWi8yori17yWlVVCX9/f1RUVOLIke8FJszFGT9+PL79djdu377TwQJH1xyCklSTv7ZpaGhg/vz5mD+/df7C4uLisGLFCv6brDQ0NGBtbQ1ra2swmUyEhISIPH6YvHtKiyrQTV/4YDjNbg1DVG8uqW2Kg4s55BXkEPfWaqqmPE3OxevCCgx1Ne90gUNXVw+5ucJLlRuHl7p3Fz83pKurh8JC4aGsxrTGso3DXgUFhSLzihoqq62txfLly/H06VOEhYXDzEz02+xE6dGjB9jspo8VlzV5mhyXneLiYvTo0QPy8vJQVVXF69ev+decnJxEHn5I3k3PnxWiuwFLaGWTiVWPf65LNmE61MUcZa+rkBz3t1T1KzDloaIuPBTT0VlYWODly5dCp6wmJyfzrzdVNi0tDVwu962yf0FJSQnGxg37OMzMzKCgoIDHjwX3ytTW1iItLQ3m5oJ1cLlcrF//Fe7evYvAwECp5kR5PB5ycnL4Zzd1FHLyDIm/uqImA0dNTQ1OnjyJhQsXYuzYsXBwcICDgwPGjh2LhQsX4tSpU02Omb6tZ8+eKC5uWJJqbGyMK1eu8K/Fx8e3yZkqpHOK/z0dcvJyGDn535U7CopyGD7eElmp+SjMbVgyq9lNFfpGWpAXMXSg31sLfS174N71Z6iv5wpdV1ZVhIKicLn3R5lCnaWMrNT8Vnwi2XB1dQGXy8XZs/+eSMDhcHDhwgVYWVnB0LAXAKCgoACZmZkCw1quri4oKSlBTMxv/LTG70eMGMEfgtLQ0ICDgwN++eUXlJf/u0otKuoSKisr4ebmKtCmbdu24ddff8XateswZoyL2LY3fja86fTp0ygpKZFo17ss0aoqMbKzszF79mzk5OTA3t4ew4YN4y/xY7PZePbsGQICAvD999/j8OHDYteGv2nYsGG4desW3Nzc4OXlhTVr1iA5ORlMJhNJSUnw8fFpvSfrgDwXT4SGlhpYWg3DLYM/sIW8QkOX9+TeSyhnV7Zn8zqUjJQ83Lv2FB5zHaCuqYz8l6VwGmsBXX0Wdq+4wM83bb4jho+zxKqPjgrt63B0a/jNV9wwVR/z7lj4jRvuXn+GvJevwQADfft3xxCXfijIZSPmTFLbPWAbGTDAFq6ubggNDUVJyWsYGRkhKuoScnKysW/fv8d5BAcH49Kli4iO/pn/f9fFxRW2tj9g48aNyMzMgra2Nk6f/hF1dXXw9V0sUI+fnz+8vb0wZ44Ppk+fjvz8fBw7dgxDhgzBiBHO/HwnTpzA6dM/wtbWDioqyoiOFtxwOXr0aKioNMx1jB8/Dm5ubujXrx+YTCU8ePAAv/56Gebm5vj444/QkdCqKjG2bNkCdXV1xMTEoGfPniLz5Obmws/PDwEBAQgPD2+2slWrVvFXXLi7u0NNTQ2XL19GTU0NNmzYAE9PzxY+RufgvdIDhsY9+N8PGzsQw8Y2HD0SfeI6BY63HNh6BR6v/jmrSkMJ2ZnFCF4bhdQH2RKVd3AxR352KdL/eiXyen72ayTGZWHAECM4T7CCnLwcivLKcPVsMqJP3EcFu3PuTwoICEBYWBh+/jkapaWlMDMzQ1BQMAYPHtJkOXl5eYSEhGLPnj04deokqqurYW1tjY0bNwm9SMjS0hIREREICgrCrl27oKqqismTp2DJkiUCqyTT0hqCdlJSIpKSEoXqbAhcDYFj3LjxSEx8iKtXr6KmpgY9exrAy8sb8+bN4weXjqKrDkFJSuwhh/b29vjuu++aPAceAG7cuIGlS5fiwYMHbdJASXX0Qw47u/Y45PBd01qHHBLxWuuQw/WLf5Q4b0Dox61SZ0citsehpKSEiormV65UVFSIXPvdlIYNQEnIzc3F7Nmzoa+vj6SkJPTq1Qs6OjpS3YsQQmTtXV9VJTZwuLm5Ydu2bdDQ0OCfO/O2P/74A9u3b8eHH34oUWVlZWVYtmwZ/vjjD6irq6OiogKTJ0+Gvr4+jh49Cm1t7S57yCEhpOt414eqxAaO1atXIy8vD/PmzQOLxYKJiYnAzvHMzEyw2WyMHDkSq1evlqiywMBApKen4/Tp07CysuKfnwMAw4cPx8GDB//j4xBCSNuTk6PAIZKamhoiIiKQmJiI2NhYpKen89eG6+vrY9iwYRg5ciRsbW0lruzq1av46quvYGtrK/AGK6DhSOacHPHvWCCEkI6io57aKyvNHvhjZ2cn8uCzlqipqRG7kaeiouKd/8sghHQO7/pQlUw/qfv374/Lly+LvPb7779L1XshhJD2IicnJ/FXV9RkjyMvLw9nzpxBfn4+zMzM4O7uLnSgYXp6OjZt2oRjx441W5mvry8WLlyI6upqjBs3DgwGA0lJSbh06RLOnTuHQ4cO/benIYQQGVBQoFVVIj1//hwzZsxAZWUl9PX1cebMGYSHhyMwMFBgb0d5eTnu3ZPsRfLOzs4IDg5GYGAgoqOjAQCbNm1Cz549sWfPHjg4OPzHxyGEkLZHk+Ni7N69G4aGhjh48CB0dHSQm5uLgIAA+Pr6/qdd3i4uLnBxcUFWVhaKi4uhqakJU1PTFj8AIYTIWlc9g0pSYgNHQkICvv76a/6GvJ49eyI0NBQRERHYtGkT8vLysGTJkhZXbGxsDGNj4xaXJ4SQ9kI9DjEqKiqE3lsMAAsWLED37t2xYcMGFBYWwsPDQ6oKCwsLce3aNeTm5gqdrMtgMLBq1Sqp7kcIIbLWVSe9JSU2cBgZGSExMVHkvIOHhwdYLBaWL1+Ohw8fSlzZlStXsGLFCtTU1EBTU1PkKy4pcBBCOjqaHBfDyckJZ86cwdy5c0VG1zFjxuDAgQPw9fWVuLKdO3di+PDh2LJlC51JRQjptOhYdTFmz56NIUOGoLKyEurq6iLzDBkyBKdPn0ZiovBxyaLk5+dj48aNFDQIIZ0aDVWJoaenh1GjRjV7AxMTE6Gz+sVxdHREamoqHB0dJW4gIYR0NDQ5LkObNm2Cv78/eDwenJycRE6+GxgYyLJJhBAiNepxiGFvby/wJq+mMBgMxMfHN5tPTk4OLBYLO3bsELo3j8cDg8FASkqKRHUSQkh7oR6HGD4+Ps0Gjvj4eMTFxUkcYL788ks8fvwYfn5+MDY2FrmqihBCOjoFxbZZVZWVlYUtW7YgISEBSkpKmDBhAlauXAkVFRWJ7xETEwM/Pz/069cPUVFRzRdoAbGBw9/fX2yh+/fvIyQkBLdv34aVlZXEK6vi4+MREBCAiRPpNa+EkM6rLYaq2Gw2vLy8YGBggKCgIBQXFyMwMBDFxcXYs2ePRPeoqqrC1q1boaur2+rte5NUcxx3795FaGgo7t69C0tLS4SFhWH06NESl9fX15cqchJCSEfUFkNVp06dApvNRmRkJH/lqby8PFauXAlfX1/069ev2XuEhYWhV69eMDQ0xF9//dXqbWwkUdi8ffs2Zs6cCS8vL5SXlyMsLAznzp2TKmgAwBdffIGIiAiUlJS0qLGEENIRtMWx6rGxsRg6dKjAdoWxY8eCyWQiNja22fLp6ek4fvw4NmzY0KJnkkaTPY64uDiEhIQgPj4eAwYMQEREhMDJuNK6ePEiXr16hdGjR8PS0lJoVRWDwUB4eHiL708IIbLQFi9ySk9Px7Rp0wTSmEwmjIyMkJGR0Wz5zZs3Y/r06TA3N2/1tr1NbOD45JNP8PDhQ9jZ2eHAgQMYMWLEf66soqICffr0EfieEEI6G2l6Emw2m//a7TexWCyBX57ZbLbILQosFgulpaVN1hEdHY0nT55g7969ErfrvxAbOB48eAAASEtLa/YUXEmX4x4/flzK5hFCSMejoCB54Dh69ChCQkKE0v38/JpchCSp8vJybNu2DcuXLxcZeNqC2MDh5+cnkwYQQkhnw5Bictzb21vkKeJvf8izWCyRPRM2m93k6Rz79u2DlpYWXF1d+eVra2vB5XLBZrOhrKwMJpMpcXslQYGDEEKkJM2qqreHpMQxNTVFenq6QBqHw8Hz588xdepUseUyMjLw5MkTkSeZDx48GGvXrsWsWbMkbq8kZHrkCCGEdAWSbnqWhrOzM8LDw1FSUgJtbW0ADZv5OBxOk4uSli5dCm9vb4G0/fv3IzMzE4GBgQLzyq2FAgchhEipLfZxeHp64sSJE/D19YWvry+Kioqwbds2jB8/HmZmZvx869atQ2RkJB4/fgwAIldRnT9/Hnl5eSJ7Ia2BAgchhEhJXorJcUmxWCwcPXoUAQEB8Pf35x858vbL7bhcLurr61u9fmkweDwer11bQAghncy1355InHe0W9vvq5A16nEQQoiU6HTcLsLHWXidNGk9h2P9YMugwynbUhIvCrynp9u7GV0ao99HrXMfChyEEEKkIdcGq6o6EwochBAiJepxEEIIkUpbrKrqTChwEEKIlGhynBBCiFRoqIoQQohUaHKcEEKIVKjHQQghRCo0OU4IIUQqNDlOCCFEKjRURQghRCo0OU4IIUQq1OMghBAiFZrjIIQQIhU5eQochBBCpNAW7xzvTChwEEKIlN7xkSoKHIQQIi2aHCeEECKVd3ykigIHIYRIi+Y4CCGESEWehqoIIYRI4x3vcFDgIIQQadFQVTtISUlBZmYmOByO0DV3d/d2aBEhhEiO8W6fqi7bwFFaWop58+YhKSkJDAYDPB4PgGD0psBBCOno3vVDDmUaN3fs2IHq6mpERkaCx+Ph0KFDOHv2LLy8vNC7d2+cP39els0hhJAWkZNjSPzVFcm0xxEXF4cVK1agX79+AAANDQ1YW1vD2toaTCYTISEhCAkJkWWTCCFEau94h0O2PY7i4mL06NED8vLyUFVVxevXr/nXnJycEBcXJ8vmEEJIizAYDIm/uqJmexzp6emIjY1FRkYGSktLAQCampowMTGBs7MzTE1NJa6sZ8+eKC4uBgAYGxvjypUrcHZ2BgDEx8dDRUWlJc8gcwqKcnD3cYCjmwXUWMp4mVGE8wfv4NG9502W+zLIA/3tDcVeXz71e7wurAAAuPsMgY1DH+j1ZEFJRRHF+eVIistC9PH7KCutbs3H6VJU1JQxe9VUWA82h/XgftDR08R3a47g8PYz7d20Do/L5eLwuT9x6pd7yC8uQ5+eOpg3YwQmf/Bes2WTn7zE+asPcTc5A9l5r6HFUoWdRS8smemCvoa6AnmPX7qNyzf/QmZ2IcoqqtG9mwYcBpjA95NR6NVDu60er1V10REoiYkNHNXV1fjqq6/w888/Q1FREUZGRmCxWACAjIwMXLhwATt27MD48eOxdetWKCkpNVvZsGHDcOvWLbi5ucHLywtr1qxBcnIymEwmkpKS4OPj03pP1obmrHXBoFGmuHImCXkvXsPpw/5Yun0idi6LxJPEHLHloo7fR2zUI4E0OXk5zFr1AV69eM0PGgBg3L87slLzcOfKE1RX1qJnH204T7SCnZMxNs75ETVVtW32fJ2Zti4LC7/5FK9eFCD1QTqc3Aa2d5M6jT3HruDAmZuY4TYIA8x74eqdFHy5+ywYYGDSB3ZNlj1w5iYepDzH2OE2sDDugcKScvwQdQfTloTj5K55sDDW5+d9nJ4DY8NuGDPUEprqyniZV4Kffo3HtbupuBC8GD10WW39qP9ZF+1ISExs4Ni1axf+/PNP7Ny5E25ubmAymQLXORwOYmJiEBAQgJ07d2L9+vXNVrZq1SpUVzf8tuzu7g41NTVcvnwZNTU12LBhAzw9Pf/j47S9vpbd4eBijp/23cIv/0sAAPz5ayq2HPkEH/kOQ8CCn8SWfXz/hVDaAAcjKCjK43ZMmkD6d19GCeVNf/QKi7eMg/3wvrgd8+Q/PknXVJBbjDEGXijILYZBn+64nHW4vZvUKeQVsnEk8hY8xw3GxsWTAQAzxg7CzDWHsOP7XzHO2QYK8vJiy8/yGIZdq2aAqfjvR8q4EQMw2S8EEadj8e2XH/HTA5dOFSo/xtES05fuw/mrCVj48ajWe7A20laHHGZlZWHLli1ISEiAkpISJkyYgJUrV0o0GhMZGYl9+/YhOzsbRkZGWLx4McaPH98m7RQbOKKjo7F27VpMnDhR5HUmk4kJEyagtrYW27dvlyhwKCkpCfRMXF1d4erq2oJmt5/3R5qBW8/FjYv/9hzqOPW4GZ2C6Qsc0U1fA0WvyiS+31A3C3C5PMRJEAga76uq3nzv7l1Vy6lDQW5xezej07l6JwW1dfX4ZPwQfhqDwYDn+CFYufMnxD96DgfbvmLLD7Q0EkozNuwGM6PuSH+e32z9hnpaAAB2RecYhm2LI0fYbDa8vLxgYGCAoKAgFBcXIzAwEMXFxdizZ0+TZS9fvozVq1dj/vz5GDZsGK5cuYLly5dDTU0NI0eObPW2NjlUpaurK+4yn66uLr8XIY36+nqRGwA7+jyHUT9d5OewUVleI5CemZIHAOjTT0/iwMFUVoD9sL54kpiDkvxyoesMBqDGUoa8ghx69NLC9AVOqK/jIjXh5X9/EELekJKeC6aiAsyNewik25r3ariekdtk4BCFx+Oh6HW50BxHo5LSCtRzecjJf43QU9cBAE7vmbWg9bLXFkNVp06dApvNRmRkJHR0dAAA8vLyWLlyJXx9ffmrUUUJCgrChx9+iBUrVgAAhg4dioyMDOzdu1e2gWPgwIEIDQ2FjY0NNDU1ReYpLS1FWFgY3n//fYkqq6iowHfffYfLly+jqKiIvwHwTSkpKRI2vX1odlNDaVGFUHorD7RlAAAgAElEQVRjmpaumsT3GjjCBMqqTMT9liryup6BJradnMn/vji/DPu3/Iacv0ukbDUhTcsvKYOulprQKiA9bfWG68Vsqe956fdE5BWxsfiTD4Su1dXXw/GzbfzvtViq+GrBBAwf2FkCR+tHjtjYWAwdOpQfNABg7NixWLduHWJjY8UGjhcvXiAjIwPLli0TSJ84cSLWrl2L4uJigXu2BrGB4+uvv8bMmTMxatQoODo6wszMDBoaGgCAsrIypKenIy4uDiwWC0ePHpWosvXr1+PGjRvw8PCAsbExFBUVW+cpZIippIDXtfVC6bWchjRFJcm3xji6WYBTU4f7v6eLvF5SUI5dyyKhqKSAPuZ6GORsCiWVzvczIx1fTU0dFBWF/+0qMRvSqmvqpLpfxosCbA6Pgp1Fb0xzFV6gIC8nh8MBs1BbW49nL/Jx6XoiqqqFRyA6KmniBpvNBpstHHhZLBZ/wRHQsIJ12rRpAnmYTCaMjIyQkZEh9v6N195e4WpmZsa/LrPA0adPH0RHR+PkyZO4efMmzpw5w394FosFU1NTLFq0CJ6envyA0pybN29i/fr1mDpVeHKss+DU1EFRUXiSUJHZkFYr4X8wlrYKrAb1RsLNDFRViP4PU8upx+P4hmGpxFtZSIl/ibWh01BWUoXEuKwWtZ+82+rruShmC/aYNdVVoKSkgNpa4X+7NZyGNGUpfiEqKCnDgk3HoaGmjOB1npCXF94uxmAw4PRewwfdyMHmGONgiSn+IVBVZuLzSUOleaR2IU2P4+jRoyI3Nvv5+cHf35//PZvNFggkjVgsFn8rhCiN194u2zhS1FTZlmryX4OGhgbmz5+P+fPnt0plWlpa6NatW6vcq72UFlWgm77wX65mt4YhqjeX1DbFwcUc8gpyiHtrNVVTnibn4nVhBYa6mlPgIC2SW1gKlznfCqQd3eqD7toaiHuYAS6XCzm5fz/oC0oa5t6660i2RLasohrzvzkGdkU1ftg+Fz26SVbO2LAbLE164tLvSZ0kcAgPs4vj7e0NDw8PoXRRQaKzkOmRIwsWLMDhw4cxdOhQifZ9dETPnxXCcmAvqKorCUyQm1j1+Od6gUT3GepijrLXVUiO+1uq+hWY8lBRZzafkRAR9LTVcThglkBafxN9PHuej59+i8fTv/Nh0fffPReJaS/4eZpTw6nFos0nkJVdhMMBs2Bm1F2qttVwasERMQzcETG4ku+jentIqql8ooa02Gw2TExMxJZr7Fmw2Wzo6enx09/csN3amgwcNTU1OHfuHG7cuIHMzEz+ESFaWlro27cvRo0aBQ8PD4mDwIwZM/D8+XOMHj0a9vb2QkNcDAYDW7dubeGjyEb87+kY98lAjJxszd/HoaAoh+HjLZGVmo/C3IYVVZrdVKGixkRBNhv19VyBe+j31kJfyx64dj5Z6BoAKKsqoq62HnW1gtfeH2UKdZYyslKbX95IiChKTEX+ENGbxgztj20Hf8HJn+/y93HweDz8+Ms96GmrY5BVH37ektIKlLAr0VNPEyrKDb/E1NdzsWz7aTxMfYHQ9Z/BXsTyXKAhONTWcaGuKviZ8SDlOZ5k5WPiyAGt9ahti9f6Ac7U1BTp6YLznRwOB8+fP29yeL8xqGRkZAjMczTeq6mg01JiA0d2djZmz56NnJwc2NvbY9iwYfyoyWaz8ezZMwQEBOD777/H4cOHYWgo/iiNRj/99BMOHDgAFRUVPH36VGhTYWeQkZKHe9eewmOuA9Q1lZH/shROYy2gq8/C7hUX+PmmzXfE8HGWWPXRUaHluY5uFgAgdpiqj3l3LPzGDXevP0Pey9dggIG+/btjiEs/FOSyEXMmqe0esAvwXDwRGlpqYGk1DB8O/sAW8goNc1An915CObuyPZvXIenrasJrsiMOnfsD9VwebM0NcfV2Ku4/+hvblk2FosK/83onou4g9OR1HN3qw1+iu/3QZVy7k4oPhligtLwSF68/FLh/47ElBSXl8PgiDONG2MC0tx6YigpIy3yFyGsPoa6mhEWewiuwOiSe8C98/5WzszPCw8NRUlICbe2Go1diYmLA4XCaXFLbu3dvmJiY4OeffxbYFxcVFYUBAwa0+sQ40ETg2LJlC9TV1RETE4OePXuKzJObmws/Pz8EBAQgPDy82cqCg4MxZcoUbNq0CcrKyi1vdTs7sPUKPF79c1aVhhKyM4sRvDYKqQ+yJSrv4GKO/OxSpP/1SuT1/OzXSIzLwoAhRnCeYAU5eTkU5ZXh6tlkRJ+4jwp259gk1V68V3rA8I39CMPGDsSwsQ0re6JPXKfAIcaKWa7Q1FDBj5fvIfLqA/Qx0MG2ZVPhPsa+2bIpGbkAgOt303D9rvAvRI2BQ0tDFZNG2eFeciaibySDU1uH7t00MGmkLRZ6joJhd63Wfag2wmiDHoenpydOnDgBX19f+Pr6oqioCNu2bcP48eP5K6QAYN26dYiMjMTjx4/5aV988QWWLVsGIyMjODk54erVq/jzzz8RERHR6u0EAAZP1GYKAPb29vjuu++a3Txy48YNLF26FA8ePGi2ssa9IY6Oji1rbRN8nOk49rZ0ONYPtgzRpwiQ1pHEiwLv6en2bkaXxuj3UfOZJMBh50qcl8kS/Yu3KJmZmQgICEB8fDz/yJFVq1YJbIxes2YNzp8/j7Q0wQB9/vx5oSNHJkyYIHHd0hDb41BSUkJFRfMrhCoqKiQecho1ahQSEhLaJHAQQojMSDE5Lo2+ffvi0KFDTebZtm0btm3bJpTu4eEhcvVWWxAbONzc3LBt2zZoaGhgxIgRIvP88ccf2L59Oz788EOJKps6dSo2bdqEqqoqgTmTN1lbW0vYdEIIaR+MNpjj6EzEBo7Vq1cjLy8P8+bNA4vFgomJicDO8czMTLDZbIwcORKrV6+WqLK5c+cCAA4ePIiDBw8KbKLh8XhgMBgd/sgRQggBOsey4bYiNnCoqakhIiICiYmJiI2NRXp6On+Nsb6+PoYNG4aRI0fC1tZW4sqOHTv231tMCCHtjXocTbOzs4OdXdMvcZHUkCFDms9ECCEdXRusqupMZLpznBBCugKa42hCXl4ezpw5g/z8fJiZmcHd3V1ot3d6ejo2bdokdhhq4MCBOHbsGGxsbGBvb9/s4WAJCQlSPgIhhMgYt/Oc5NsWxAaO58+fY8aMGaisrIS+vj7OnDmD8PBwBAYGCuztKC8vx71798RW4OPjwz8/xcfHp03OsSeEEFmiHocYu3fvhqGhIQ4ePAgdHR3k5uYiICAAvr6+Ur0f3M/Pj//nN48QJoSQTusdn+MQPij/HwkJCVi0aBH/nJOePXsiNDQUX3zxBTZt2oSgoCCZNZIQQjoUHlfyry5IbI+joqJC5Aa9BQsWoHv37tiwYQMKCwul3qn44MEDXL58Ga9evUJNjeB7uxkMhkRnXhFCSLt6x3scYgOHkZEREhMT4eDgIHTNw8MDLBYLy5cvx8OHD0WUFu2HH37Ali1boK2tjT59+nTKV8cSQggDXbMnISmxgcPJyQlnzpzB3LlzBd4I1mjMmDE4cOAAfH19Ja7syJEjmDp1KjZv3gwFBVoJTAjppNrorKrOQuyn9+zZszFkyBBUVlZCXV1dZJ4hQ4bg9OnTSExMlKiywsJCTJo0iYIGIaRzo6Eq0fT09DBq1Khmb2BiYiLxG6YcHByQkpJCp+MSQjo1Wo4rQ0uXLsXKlSuhrKyM4cOHi5x819LqHC9yIYS8w6jHIZoku7wbMRgMxMfHN5vP3d0dALB582ax96bTcQkhHR6XAodIkuzyjo+PR1xcnMQBZuvWrbRznBDS+dXXNJ+nCxMbOJra5X3//n2EhITg9u3bsLKyknhl1dSpU6VvISGEdDQ0VCW5u3fvIjQ0FHfv3oWlpSXCwsIwevTotmobIYR0TDRU1bzbt28jNDQU9+7dg7W1NcLCwvDBBx9IVMGkSZOwe/dumJubY9KkSU3mZTAYuHjxokT3JYSQdsOra+8WtKsmA0dcXBxCQkIQHx+PAQMGICIiQuBkXEnY2NhARUUFQMP7xGmOgxDS6VGPQ7RPPvkEDx8+hJ2dHQ4cOIARI0a0qILAwED+n7dt29aiexBCSIdCgUO0Bw8eAADS0tKwZMmSJm8i6XJcQgjpEmhVlWhvvkejNRUWFuLatWvIzc0VeTruqlWr2qReQghpNbSqSrS2CBxXrlzBihUrUFNTA01NTZGn41LgIIR0eDRUJTs7d+7E8OHDsWXLFv4LogghpLPhSdHj6IrLgcS+AbAt5Ofn4/PPP6egQQjp3Lh1kn91QTINHI6OjkhNTZVllYQQ0vq49ZJ/dUEMHo/Hk1VlBQUF8Pf3h5ubG5ycnESejmtgYCCr5hBCSItwb22UOK+ck+R5OwuZznHIycmBxWJhx44dQhsBeTweGAxGi0/Hraqqbo0mEjFUVJTBe3q6vZvRpTH6fQRbxsT2bkaXlsSLap0b0c5x2fnyyy/x+PFj+Pn5wdjYmN45TgjpnDrQEFRSUhICAwPx6NEjaGpqYsaMGVi8eDHk5eUlvseRI0cQGBiIUaNGISIiotn8Mg0c8fHxCAgIwMSJ9FsVIaQT6yD7OF68eIFZs2ZhyJAhiIiIQEZGBnbs2AEOh4OVK1dKdI+8vDyEhISgW7duEtcr08Chr6/PP7eKEEI6rQ7S4zh48CBYLBaCg4PBZDLh6OiIsrIyhIaGYu7cuRK9UXX79u1wdXXFy5cvJa5XpquqvvjiC0RERKCkpESW1RJCSOuqr5H8qw3FxsbCxcUFTCaTnzZx4kRwOBzcvn272fJxcXGIjY3FihUrpKpXpj2Oixcv4tWrVxg9ejQsLS2FVlUxGAyEh4fLskmEECK9DtDjqKysRE5ODkxNTQXSe/XqBRUVFWRkZDRZvra2Fps3b4afnx90dXWlqlumgaOiogJ9+vQR+J4QQjodKeY42Gw22Gy2UDqLxRK5JUFSZWVl/PuIundpaWmT5b///nswGAx8/vnnUtct08Bx/PhxWVZHCCFtgifFjvCjR48iJCREKN3Pz0/oFd1lZWXIz89v9p7/db9bTk4OwsPDERoaCgUF6cOATAMHIYR0BTwphqq8vb3h4eEhlC6qpxATE4O1a9c2e89jx45hwIABACCyN8Nms6GpqSm2/M6dOzFo0CDY2Njwy9fV1aGurg5sNhsqKipNbpegwEEIIVKSJnBIMyQ1depUTJ06VeJ7GxgYID09XSAtOzsbVVVVMDExEVsuIyMDqampGDx4sNC1wYMHIzQ0FC4uLmLLU+AghBApcWs57d0EAICzszOuXr2KL7/8kr+yKjo6mr80V5yAgABUVlYKpG3duhXKyspYvnw5+vXr12S9FDgIIURK0vQ42tLcuXNx6dIlLF26FDNnzkRGRgbCwsLg7e0tMFTl7e2NnJwcxMTEAAB/mOtNLBYLqqqqcHBwaLZeChyEECKljhI4evfujSNHjmDr1q2YP38+NDU1MXv2bKEX8XG5XNTXt16bZXo6bluiQw7bFh1y2PbokMO211qHHFb8OEnivGofX2qVOjsS6nEQQoiUOkqPo71Q4CCEEClx69r2KJGOjgIHIYRIiXochBBCpEKBgxBCiFR4rbhCqTOiwEEIIVKiHgchhBCpcClwEEIIkQa3rmMcOdJeKHAQQoiUaKiKEEKIVChwEEIIkYo0L3LqiihwEEKIlGg5LiGEEKnQqipCCCFSoVVVhI/D4SA8PAxRUVFgs9kwMzPD4sWL4eQ0rNmybDYbQUHf4dq1a6iqqoK1tQ2WL18Oa2trobwPHz5EUNB3SElJgaqqKlxcXLF06VKoqqry82RmZiIyMhJxcXF4+fIFVFVV0b+/JRYtWiR0z/DwcERE7BOqh8lk4u7dey34SXQsXC4Xh8/9iVO/3EN+cRn69NTBvBkjMPmD95otm/zkJc5ffYi7yRnIznsNLZYq7Cx6YclMF/Q11BXIe/zSbVy++RcyswtRVlGN7t004DDABL6fjEKvHtpt9XidjoqaMmavmgrrweawHtwPOnqa+G7NERzefqa9myYzNDlO+L7+egOuXLmCTz/9FEZGfXDp0iX4+/sjImI/3n//fbHluFwu/P398eRJGry8vKGjo4PTp3/EvHlz8cMP/0Pfvn35eVNTU7FgwXwYGxtj+fIVyM/Px/Hjx/D3338jIiKCn+/8+XM4f/48XFxc8NFHH6G8vBxnz56Bl9dMhISEinwt5Jo1a6Ghoc7/Xk5OvpV+Mu1rz7ErOHDmJma4DcIA8164eicFX+4+CwYYmPSBXZNlD5y5iQcpzzF2uA0sjHugsKQcP0TdwbQl4Ti5ax4sjPX5eR+n58DYsBvGDLWEproyXuaV4Kdf43HtbiouBC9GD13J3hvd1WnrsrDwm0/x6kUBUh+kw8ltYHs3Sea43C7xGqMWo8Dxj+TkZFy+fBlLlizB7Nk+AIBJkyZh+vRp2LPnW/zww//Elo2JiUFi4kNs374dY8d+CABwdXXFlCmTERYWhp07d/Lz7t27F+rq6jh48BA0NDQANLxwfvPmTbh58yZGjBgBAPjww3FYuHCRQC/E3d0dHh7uCA8PExk4XFxcoKurK5TemeUVsnEk8hY8xw3GxsWTAQAzxg7CzDWHsOP7XzHO2QYK8uID5CyPYdi1agaYiv/+Ux83YgAm+4Ug4nQsvv3yI3564NKpQuXHOFpi+tJ9OH81AQs/HtV6D9aJFeQWY4yBFwpyi2HQpzsuZx1u7ybJ3DseNyDX3g3oKK5cuQI5OTlMmzadn6akpAR3dw88evQI2dnZTZbV1taGq6sbP01HRwdubm6Ijb2B6uqGtxOWl5fjzp3bGDduHD9oAA0BSlVVFb/99hs/zcrKSiBoAICWlhYGDhyIjIwMke3g8XgoLy8Hl8uV7uE7sKt3UlBbV49Pxg/hpzEYDHiOH4KC4jLEP3reZPmBlkYCQQMAjA27wcyoO9Kf5zdbv6GeFgCAXUFvmGxUy6lDQW5xezejXfG4PIm/uiIKHP9ITU1Fr169wGIJDkfY2NgAANLSUpssa2FhATk5wR+njY0NampqkJWVCQB4+vQp6urqYGUlOEehqKgICwuLJutoVFhYBC0tLZHXpkyZjOHDh8HJyRFr1qxBQUFBs/fr6FLSc8FUVIC5cQ+BdFvzXg3XM3KlviePx0PR63Jos9REXi8prUBhSTmS0l5izXfnAABO75lJXQ/puurqeRJ/dUU0VPWPwsIC6OnpCaU3Dv3k54v/EC4sLICdnfBYu66uHr9s//6WKCwsBADo6QkPJ+nq6iErK6vJNiYkJCApKRE+PnME0lksFjw9PWFrawcmk4mEhAScPv0j/vorGSdPnhLo3XQ2+SVl0NVSA4PBEEjX026Yy8kvZkt9z0u/JyKviI3Fn3wgdK2uvh6On23jf6/FUsVXCyZg+EAKHORfXahT3yIyCxx//vknjh8/juzsbHTv3h0ffvghZsyYIavqm1VTUwNFRUWhdCUlJf71psoymUwRZZkCZWtqGoY7FBVF520c0hKluLgIa9eugaGhIebMEQwcn332mcD3Li4usLGxwbp1a3Hq1EnMmzdf7H07upqaOigqCv8zVWI2pFXXSLeDN+NFATaHR8HOojemuQpP6srLyeFwwCzU1tbj2Yt8XLqeiKrqd3vpJRHWVYegJCWTwHHt2jX4+vpCQ0MDffv2xZMnT3Dr1i28evUK/v7+smhCs5SUlFBbWyuU3vih3xhAxJXlcIQ/XGpqOAJllZSUAQC1taLzKisri7x/VVUl/P39UVFRiSNHvhea+xBl/Pjx+Pbb3bh9+06nCBz19VwUsysE0jTVVaCkpIDaWuHgUMNpSFNWkvyfcEFJGRZsOg4NNWUEr/OEvLzwSC2DwYDTe6YAgJGDzTHGwRJT/EOgqszE55OGSvNIpAt7x+OGbALH/v374eDggLCwMKipqYHL5SIgIACHDx/G4sWLheYG2oOurh5yc3OE0huHl7p3Fx7GerNsYaHwUFZjWmPZxmGvgoJCkXlFDZXV1tZi+fLlePr0KcLCwmFm1k+Cp2nQo0cPsNmlEudvT7mFpXCZ861A2tGtPuiurYG4hxngcrkC/04KSsoBAN11JFsiW1ZRjfnfHAO7oho/bJ+LHt0kK2ds2A2WJj1x6fckChyE711fjiuTT+yMjAz4+PhATa1hMlJOTg6LFi1CVVVVk6uVZMnCwgIvX74Emy04Zp6cnMy/3lTZtLQ0odVMycl/QUlJCcbGDfs4zMzMoKCggMePHwnkq62tRVpaGszNBevgcrlYv/4r3L17F4GBgU3uJXkbj8dDTk4OtLU7x8Y1PW11HA6YJfDV30Qf/U16glNbh6d/C66ASkx7AQDob6Iv6nYCaji1WLT5BLKyi7Dv689hZtRdqrbVcGpRXkmrqsi/uDzJv7oimQQONpst9AGmo6PDv9YRuLq6gMvl4uzZf3e/cjgcXLhwAVZWVjA0bFjFU1BQgMzMTIFhLVdXF5SUlCAm5t/ltI3fjxgxgj8EpaGhAQcHB/zyyy8oLy/n542KuoTKykq4ubkKtGnbtm349ddfsXbtOowZ4yK27cXFwksjT58+jZKSEol2vXcESkxFOL1nKvClqa6CMUP7Q1FBHid/vsvPy+Px8OMv96CnrY5BVn346SWlFch4USAwJ1Ffz8Wy7afxMPUFvlvjCXtLI5H1NwQH4XmsBynP8SQrHzZmBq34tKSzq6/jSvzVFclscry8vByvX7/mf1//z+mSb6cDELvctC0NGGALV1c3hIaGoqTkNYyMjBAVdQk5OdnYt+/f4zyCg4Nx6dJFREf/DENDQwCAi4srbG1/wMaNG5GZmQVtbW2cPv0j6urq4Ou7WKAePz9/eHt7Yc4cH0yfPh35+fk4duwYhgwZghEjnPn5Tpw4gdOnf4StrR1UVJQRHR0lcJ/Ro0dDRaVhrmP8+HFwc3NDv379wGQq4cGDB/j118swNzfHxx9/hM5MX1cTXpMdcejcH6jn8mBrboirt1Nx/9Hf2LZsKhQV/t38dyLqDkJPXsfRrT5wsG3o5W0/dBnX7qTigyEWKC2vxMXrDwXu33hsSUFJOTy+CMO4ETYw7a0HpqIC0jJfIfLaQ6irKWGRp/AKrHeZ5+KJ0NBSA0urYRRh8Ae2kP/n7+Lk3ksoZ1e2Z/PaXFftSUhKZoHj7ZVAjWbNmiWUlpKS0satES0gIABhYWH4+edolJaWwszMDEFBwRg8eEiT5eTl5RESEoo9e/bg1KmTqK6uhrW1NTZu3AQTExOBvJaWloiIiEBQUBB27doFVVVVTJ48BUuWLBFYcpqWlgYASEpKRFJSolCdDYGrIXCMGzceiYkPcfXqVdTU1KBnTwN4eXlj3rx5/ODSma2Y5QpNDRX8ePkeIq8+QB8DHWxbNhXuY+ybLdu4z+P63TRcv5smdL0xcGhpqGLSKDvcS85E9I1kcGrr0L2bBiaNtMVCz1Ew7C77X2Y6Mu+VHjB8Y2/NsLEDMWxswyq16BPXu37geMcjB4PH47X5T+D8+fNS5ffw8JC6jqoqGoNuSyoqyuA9Pd3ezejSGP0+gi1jYns3o0tL4kU1n0kCcb6S741yDCtrlTo7Epn0OFoSCAghpKPitv3v2x1au+8cZ7PZyMrKgp6eHnr27NnezSGEkGbRznEZiI2Nxb1797BixQqB9L179yIiIoI/Ue7q6opdu3aJ3IVNCCEdRVc9g0pSMlmO+7///Q8vX74USLt27RpCQ0Nhbm6Or776Cp9++imuXLmCkydPyqJJhBDSYh3pdNykpCR88sknsLW1xYgRIxAcHMz/ZbwpxcXF+PrrrzFq1Ci89957mDBhAo4fPy5RnTLpcaSkpGDZsmUCaWfPnoWKigoOHTrE3+OhoKCAyMhIeHt7y6JZhBDSIh1lVdWLFy8wa9YsDBkyBBEREcjIyMCOHTvA4XCwcuXKJsv6+/vj77//xrJly2BgYIBbt24hICAAXC632c9gmQSOkpISGBj8u4GKx+Ph9u3bcHBwENgYOHz4cKlXYBFCiKx1kLiBgwcPgsViITg4GEwmE46OjigrK0NoaCjmzp0rdk/cq1evcP/+fWzduhXTpk0DADg6OiI1NRVRUVHNBg6ZDFVpa2vzz3wCGvYoVFRUYNCgQQL5lJSUJOpiEUJIe+JyeRJ/taXY2Fi4uLgIzAtPnDgRHA4Ht2/fFluu8eSLt1+5wGKxIMkODZn0OOzs7HDixAn+A/7www9gMBgYPXq0QL6nT5+iR48eYu5CCCEdQ70Uk+NsNlvk0UosFkvoxXHSqKysRE5ODkxNTQXSe/XqBRUVFbFvCgWA3r17Y+jQodi3bx+MjY1hYGCAuLg4xMTEYPPmzc3WLZPAsWTJEsyYMQOOjo5QV1dHXl4eJk2aJPTA0dHRQr0QQgjpaKRZjnv06FGEhIQIpfv5+f2n10qUlTVsLBQVfFgsFkpLmz4ZOzw8HMuWLcOkSZMANBw+u3r1ari7uzdbt0wCh6mpKS5cuIAzZ86grKwM1tbWQpsCi4qKYGlpicmTJ8uiSYQQ0mLSjEB5e3uL3AQt6gO/rKwM+fn5Qulve3POuCV4PB7Wrl2LrKws7N69G3p6erh16xZ27doFXV1dTJzY9AkGMtsA2Lt3b6GVVW/q1q0bNmzYIKvmEEJIi0kTOKQZkoqJicHatWubzXfs2DEMGDAAgOgTxtlsNjQ1NcWW//3333H58mVcuHAB/fv3BwA4ODigqKgI27dv7ziBQxLp6enYv38/tm/f3t5NIYQQsdpqznvq1KmYOnWqxPkNDAyQnp4ukJadnY2qqiqhA1bf9OzZM8jLywu9Z8jS0hI//fQTqqqqoKKiIra8zF69V19fj8TERPzyyy949EjwRUbJyclYvHgxJk2ahGvXrsmqSYQQ0iId5UVOzs7OuHr1qsCrq6Ojo/lLc8UxNFgQbWYAAA8GSURBVDREfX290Enkjx49Qrdu3ZoMGoCMehyvXr3CggUL8OTJE/B4PP6Kql27duGbb77BpUuXoKamhvnz52P27NmyaBIhhLRYHZfRfCYZmDt3Li5duoSlS5di5syZyMjIQFhYGLy9vQWGqry9vZGTk4OYmBgAwMiRI2FoaIglS5Zg8eLF6NGjB/744w+cP39eogl7mQSOoKAgvHz5EsuWLUP//v2Rk5OD/fv3Y/r06UhPT8fnn38Of3//JsfkCCGko+gohxz27t0bR44cwdatWzF//nxoampi9uzZ8PPzE8jH5XIF9sipqanh6NGj2LNnD7799luw2Wz06tULa9asweeff95svTJ5H8fo0aMxa9YseHl58dPi4+Px2WefYeHChVi6dOl/roPex9G26H0cbY/ex9H2Wut9HMenKEucd+aFrvfZJJMeR15eHmxsbATSGlcEODs7iypCCCEdVkc5cqS9yCRw1NfXQ0FBsCp5+Yb3EysrSx65CSGkI6DAISOHDx+Grq4u//vGEbKDBw9CR0dHIO/69etl1SxCCJEaBQ4ZMDAwQFJSksj0hw8fCqQxGAwKHISQDq22g0yOtxeZBA7am0EI6Ure9R6HTDYAfvvtt8jLyxNIu3HjBsrLywXSnj9/LvR6WUII6Wi4XMm/uiKZBI4DBw4IBI76+nosXLgQf//9t0C+kpIS/Pzzz7JoEiGEtFhH2TneXmQyVCVqq4gMto8QQkib6KoBQVId6pBDQgjpDDrKkSPthQIHIYRIiXoc7YjBeLejNiGkc6LAISPe3t5CgeKzzz4TSKN5D0JIZ9BVV0tJSiaB4+2TGgkhpDN713scMjkdlxBCupLNI5p+0dGbvr5Z1YYtaR8UOAghhEhFZq+OJYQQ0jVQ4CCEECIVChyEEEKkQoGDEEKIVChwEEIIkQoFDkIIIVKhwEEIIUQqFDgIIYRIhQIHIYQQqVDgaMbevXthYWEBT09Pkdfs7e353+fn52PHjh2YMmUK7O3tMXz4cHzxxRdCbzoEgPLycnz99ddwcHCAvb09Fi5ciJcvXwrly8rKwpw5c2Bvb4+hQ4diy5YtqKrqWkcY0M9Y9hp/5qK+du3aBQAYPXo0P83KygqjR4/GmjVrkJubK3S/Z8+e4YsvvoCzszMGDBgAZ2dnLFiwADdu3ODnOXfunEA9gwcPxscff4yrV6/K7LlJ66D3cUjowYMH+PPPPzFs2DCxeR49eoTffvsN06ZNw3vvvQc2m42IiAjMmDEDFy9ehL6+Pj/vihUr8OjRI2zYsAHq6uoIDg7GrFmzcOnSJaioNJyDw2az4eXlBQMDAwQFBaG4uBiBgYEoLi7Gnj172vyZZY1+xrKlrKyMo0ePCqX36NGD/+exY8fCx8cHdXV1+OuvvxAcHIxHjx7h3LlzUFRUBAA8f/4cM2bMgIWFBVavXo1u3bohJycHN27cwN27dzFy5EiB+x88eBAaGhooKSnB4cOH4evri4MHD2LEiBFt+8Ck9fBIk4KDg3nvvfceb8aMGTxPT0+R1xqVlpbyamtrBfIUFRXxrK2teXv37uWnPXz4kGdubs77/fff+WnZ2dk8Kysr3okTJ/hpERERPDs7O15RURE/7eLFizxzc3PekydPWu0Z2xv9jGXv7Z+rKB988AFv06ZNAmkRERE8c3NzXkJCAj/t22+/5b333nu8yspKoXvU19fz/3z27Fmeubm5wM+6rKyMN2jQIN78+fNb+iikHdBQlYQWL16MhIQExMXFic3DYrGgoCDYidPR0YG+vj7y8/P5aTdu3ICGhobAb1gGBgYYOHAgYmNj+WmxsbEYOnQodHR0+Gljx44Fk8kUyNdV0M+447OwsAAAgeEqNpsNdXV1fi/uTXJyTX/EqKuro2/fviKHEEnHRYFDQiNHjsSAAQMQEhIiVbnc3Fzk5OTAxMSEn5aeng4TExOh/1RmZmbIyMgQyGdmZiaQh8lkwsjISCBfV0E/Y9mrq6sT+uI1cWB2Y8Do3bs3P83a2hr5+flYv349UlJSwJXiLUf19fXIzc1F9+7dW/4QROYocEjBz88P9+/fx+3btyUuExAQABaLBQ8PD34am82GhoaGUF4Wi4XS0lKBfCwWq9l8XQn9jGWnsrIS1tbWQl+///47Pw+Px0NdXR1qamoQHx+P/f9v7/5jqfr/OIA/q92o7pDamqwyclnSTSST5F4VYYV+WSkrllyabiU/uipiWc2KprjrB/Or9XuztbZSW/qh9EsZf5SSVYhLXdG9lznfP5qzLjefjm9u0uvxl/M67/f1Pu/3dl68z/uet1wOkUgEe3t7tkxAQAD8/Pxw4cIF+Pv7w8nJCRKJ5KcPvXt6etDd3Y3m5makpqaiubkZ3t7eQ3255Deih+MceHh4wM7ODllZWXBxcfnP8jk5Obh16xaysrJgbGyshxb+/aiP9cfQ0BAFBQX94hYWFuzPRUVFKCoqYo8tLS2Rnp6uVX7MmDFIT0/Htm3bcPv2bTx+/Bj37t1DaWkpJBIJoqOjtcr/uPjB0NAQERERWLt27W+6KqIPlDg4ioqKQkREBCoqKgYsd+XKFRw9ehSJiYkQi8Va54yMjHQuaVQqlVo3PyMjIyiVSp3lfpyWGWmoj/Vj9OjRWv856LJ8+XKEhoZCrVajrKwM2dnZ2L9/P7tk90fW1tawtrbG1q1b0draitDQUMjlcoSEhMDExIQtl5ubCz6fD2NjY0ydOrXfMysy/NFUFUdisRh2dnYDzsOXlpZCJpMhPDwcGzZs6HfeysoKb9++7TeX/Pr1a62blZWVFWpra7XKaDQa1NfXj+ibGvXx8GFqagp7e3s4OTlBKpVi48aNKCkpwYsXL/6zXmBgILq7u/t9x8bGxgb29vaYPn06JY2/FCWOQYiMjER5eTmePHnS79yjR48glUqxcuVKSKVSnfUXL14MpVKJsrIyNtbQ0ICnT5/C3d2djbm7u6O8vBxtbW1s7MaNG9BoNP3Wxo801MfDU1RUFPh8Pk6ePMnGWlpadJatq6sDAEyePFkfTSN6ROl+EDw9PTFr1iw8ePAA48ePZ+O1tbWQSCSYNm0aVq1ahefPn7Pn+Hw+u3pHKBTCw8MDe/fuRVxcHPh8PjIyMmBmZobAwEC2TlBQEAoKCiCRSCCRSKBQKJCWlgYfH59+K4FGGurjodfT06PVf70mTpyIGTNm6KxjYmKC4OBg5OTk4NWrV7C2tsaJEydQU1MDPz8/zJw5E2q1Gvfu3UNRURGWLFkCc3Pzob4UomeUOAYpMjISkZGRWrHKykq0t7ejvb0d69ev1zrn7OyM/Px89jg9PR2HDx9GUlISNBoNFixYgIyMDK218EZGRsjLy0NKSgq2b98OAwMD+Pr6IiYmZmgvbpigPh5aKpUK69at6xf38fEZ8FvzmzdvRkFBAeRyOY4cOYIVK1ZArVYjPz8fTU1NGDNmDMzNzbFnz55+Y0RGhlHMQIu2CSGEkD7oGQchhBBOKHEQQgjhhBIHIYQQTihxEEII4YQSByGEEE4ocZC/TlxcHPz8/DjX67ubICFkcGg5Lvnr1NfXo7OzE7a2tpzqNTY24tOnT5gzZ84QtYyQfwMlDkIIIZzQVBUHGzduRHh4OK5duwYvLy8IhUJs2bIFzc3N+PTpEyIiIuDg4AAvLy/cuHGjX/07d+4gKCgIQqEQzs7OiI+P13ozq0qlwsGDB+Ht7Q2hUAiRSISEhAR8/vxZ63PEYjGSk5NRXFwMsViMefPmISwsTOfbYIer/6cv+05VXb58GTY2NqipqUF4eDjmzp2LJUuWoLCwUKte36mqhw8fwsbGBmVlZZBKpXBwcICbmxvOnz8PALhw4QI8PT3h6OiIHTt24OvXr2zdXx0rQkYieuUIR9XV1VAoFIiJiUFnZydSU1ORkJCA9vZ2LF26FMHBwSgsLMTOnTtRWlrK7mx28+ZNbN++Hf7+/oiIiEBbWxuOHTsGqVSK06dPA/h+M+rq6kJ0dDQmT56MxsZGyOVyhIWF4eLFi1rtuH37Nt68eQOZTIaOjg4cOnQI8fHxyM3N1XeXDNpg+/Jndu3ahcDAQISEhKCkpATJycmwtbWFo6PjgPUOHDiAgIAArF69GlevXkViYiLq6+tRWVkJmUwGhUKB1NRUHDt2DDKZDAC3sSJkxPlTm53/jYKDgxmhUMi0tLSwsczMTEYgEDByuZyNKRQKRiAQMMXFxQzDMExPTw8jEomY6Ohorc979uwZIxAImIqKCp2/r6uri6murmYEAgFTVVXFxkUiEbNo0SJGpVKxsbNnzzICgYD58uXLb7nWoTbYvmQYhomNjWV8fX3Z40uXLjECgYDJz89nYxqNhlmwYAGTkpKi9flz585lj8vLyxmBQMCkpaWxsc7OTkYoFDIuLi5MR0cHG09KSmIWLlz40+v52VgRMhLRfxwc2draYtKkSexx725pbm5ubMzU1BTGxsbs1FFdXR0+fPiA+Ph4dHd3s+Vmz54NPp+Ply9fwsnJCQBw9epV5OXloa6uDp2dnWzZuro62NnZscfz58+HgYEBe9z7JtfGxkadW6EOR4Ppy4H8WI/H48HCwgJNTU2c6o0bNw5TpkyBlZWV1lt5LSws0NLSgq6uLvB4PAC/PlaEjDSUODjquz1p702k7/7WPB4PGo0GANDa2grg+14Gunz8+BHA930gYmNjsWbNGuzYsQMmJiZQKpUICwuDWq3+pXb0LTecDaYvB9I3YfJ4vF/qD131dMUYhoFGowGPx+M0VoSMNJQ49KB328x9+/bpXAra+1f39evXYWtri5SUFPZcVVWVfhpJOKGxIv8yShx6YGlpCTMzM7x7907nNqe9VCoVxo4dqxUrKSkZ6uaRQaCxIv8yShx6MGrUKCQkJEAqleLbt2/w8PDAhAkT0NDQgLt372LTpk0QCoVwdXVFcnIyjh8/DkdHR9y/fx+3bt36080nOtBYkX8ZJQ49WbZsGU6dOoXs7Gzs3r0bDMPAzMwMrq6u7NaaQUFBeP/+Pc6dO4czZ87AxcUFmZmZ8Pf3/8OtJ33RWJF/GX1znBBCCCf0zXFCCCGcUOIghBDCCSUOQgghnFDiIIQQwgklDkIIIZxQ4iCEEMIJJQ5CCCGcUOIghBDCyf8AmwccRLuMKYUAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Computing Correlation for n200/ersp/feat\n", + "\n", + "data_n200 = n200_val[1:,1:].flatten()\n", + "data_ersp = ersp_val[1:,1:].flatten()\n", + "data_feat = feat_val[1:,1:].flatten()\n", + "\n", + "print \"N200 and ERSP correlation:\", scipy.stats.pearsonr(data_n200, data_ersp)\n", + "\n", + "import seaborn as sns; sns.set()\n", + "np.corrcoef(data_ersp, data_feat)\n", + "\n", + "sns.set(font_scale=1.4)\n", + "map_data = np.array([[1.0, 0.7349, 0.0025],[0.7349, 1.0, -0.229],[0.0025, -0.229, 1.0]])\n", + "labels = ['N200 \\n mean', 'N200 \\n minima', 'ERSP']\n", + "ax = sns.heatmap(map_data, vmin=-1, vmax=1.0, center=0, annot=True, linewidths=.5, cmap='PuOr', xticklabels=labels, yticklabels=labels)\n", + "# ax.tick_params(direction='out', length=6, width=2, colors='r', grid_color='r', grid_alpha=0.5)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1m Fixed Rule based Pairwise Evaluation \u001b[0m\n", + "Mean Accuracy:\n", + "N200 mean 0.7857142857142857\n", + "N200 minima 0.7142857142857143\n", + "ERSP 0.619047619047619\n", + "Decision 0.7857142857142857\n", + "Standard Deviation:\n", + "N200 mean 0.410325903324145\n", + "N200 minima 0.45175395145262565\n", + "ERSP 0.46899322865695736\n", + "Decision 0.410325903324145\n" + ] + } + ], + "source": [ + "# Pairwise Evaluation\n", + "\n", + "total = 0\n", + "# score_n200, score_feat, score_ersp, score_decn = 0, 0, 0, 0\n", + "dist_pref = 47\n", + "score_metric = np.zeros([4,total_sub+1])\n", + "\n", + "for sub_id in range(1,total_sub+1):\n", + " total = 0\n", + " score_n200, score_feat, score_ersp, score_decn = 0, 0, 0, 0\n", + " for prod_i in range(1,total_prod+1):\n", + " for prod_j in range(prod_i+1,total_prod+1):\n", + " if abs(pref[sub_id, prod_i] - pref[sub_id, prod_j]) > dist_pref:\n", + " \n", + " total = total + 1\n", + " ground_truth = (pref[sub_id, prod_i] > pref[sub_id, prod_j]) # True means \"prod_j\" is preferred\n", + " \n", + " count = 0\n", + " pred_n200 = n200_val[sub_id, prod_i] > n200_val[sub_id, prod_j]\n", + " if (pred_n200 == ground_truth):\n", + " score_n200 = score_n200 + 1 \n", + " count = count + 1\n", + " \n", + " pred_feat = feat_val[sub_id, prod_i] > feat_val[sub_id, prod_j]\n", + " if (pred_feat == ground_truth):\n", + " score_feat = score_feat + 1 \n", + " count = count + 1\n", + " \n", + " pred_ersp = ersp_val[sub_id, prod_i] > ersp_val[sub_id, prod_j]\n", + " if (pred_ersp == ground_truth):\n", + " score_ersp = score_ersp + 1 \n", + " count = count + 1\n", + " \n", + " if count > 1:\n", + " score_decn = score_decn + 1\n", + " \n", + " score_metric[0, sub_id] = score_n200*1.0/total\n", + " score_metric[1, sub_id] = score_feat*1.0/total\n", + " score_metric[2, sub_id] = score_ersp*1.0/total\n", + " score_metric[3, sub_id] = score_decn*1.0/total\n", + " \n", + "acc_mean = np.mean(score_metric[:,1:], axis=1)\n", + "acc_std = np.std(score_metric[:,1:], axis=1)\n", + "# print \"N200 Accuracy: \", \"Feat Accuracy: \", \"ERSP Accuracy: \", \"Decision Accuracy: \"\n", + "# print \"Mean: \", np.mean(score_metric[:,1:], axis=1)\n", + "# print \"Std: \", np.std(score_metric[:,1:], axis=1)\n", + "\n", + "method = ['N200 mean', 'N200 minima', 'ERSP', 'Decision']\n", + "\n", + "print \"\\033[1m Fixed Rule based Pairwise Evaluation \\033[0m\"\n", + "print \"Mean Accuracy:\"\n", + "for idx,item in enumerate(acc_mean):\n", + " print method[idx], item\n", + "print \"Standard Deviation:\" \n", + "for idx,item in enumerate(acc_std):\n", + " print method[idx], item" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1m Fixed Rule based Ranking \u001b[0m\n", + "Metrics: MHD, Tau, NDCG\n", + "N200 Mean [0.63888889 0.33333333 0.92837468]\n", + "ERSP [ 0.98888889 -0.15555556 0.83609209]\n", + "N200 minima [0.79444444 0.11111111 0.8795795 ]\n", + "Decision [0.78888889 0.15 0.89153368]\n", + "Random [ 0.99444444 -0.14444444 0.85200403]\n" + ] + } + ], + "source": [ + "## Ranking 3 prooducts\n", + "\n", + "rank_metrics = np.zeros([5,3]) # rows: N200, feat, ersp, decn1, 4 random col: avg dist, tau, NDCG\n", + "total = 0\n", + "\n", + "def tau_score(t_rank, p_rank):\n", + " tau, _ = stats.kendalltau(p_rank, t_rank)\n", + " return tau\n", + "\n", + "# Function to calculate NDCG score\n", + "def ndcg_score(rel, t_rank, p_rank):\n", + " rel = np.array(rel)\n", + " dcg = sum((rel)*1.0/np.log2(p_rank + 1))\n", + " idcg = sum((rel)*1.0/np.log2(t_rank + 1))\n", + " return dcg*1.0/idcg\n", + "\n", + "\n", + "for sub_id in range(3,4):#1,total_sub+1):\n", + " for prod_i in range(1,total_prod+1):\n", + " for prod_j in range(prod_i+1,total_prod+1):\n", + " for prod_k in range(prod_j+1,total_prod+1):\n", + "# if not ((pref[sub_id, prod_i] == pref[sub_id, prod_j]) or (pref[sub_id, prod_i] == pref[sub_id, prod_k]) or (pref[sub_id, prod_j] == pref[sub_id, prod_k])):\n", + " total = total + 1\n", + " ground_pref = [pref[sub_id, prod_i], pref[sub_id, prod_j], pref[sub_id, prod_k]] # [rank_i, rank_j, rank_k]\n", + " ground_ordr = np.argsort(ground_pref)\n", + " ground_rank = 3 - np.argsort(ground_ordr) # rank 1 means top, rank 3 means bottom\n", + " true_rel = ground_pref\n", + " \n", + " \n", + " pred_n200 = [n200_val[sub_id, prod_i], n200_val[sub_id, prod_j], n200_val[sub_id, prod_k]]\n", + " ordr_n200 = np.argsort(pred_n200)\n", + " rank_n200 = 3 - np.argsort(ordr_n200)\n", + "\n", + " pred_feat = [feat_val[sub_id, prod_i], feat_val[sub_id, prod_j], feat_val[sub_id, prod_k]]\n", + " ordr_feat = np.argsort(pred_feat)\n", + " rank_feat = 3 - np.argsort(ordr_feat)\n", + "\n", + " pred_ersp = [ersp_val[sub_id, prod_i], ersp_val[sub_id, prod_j], ersp_val[sub_id, prod_k]]\n", + " ordr_ersp = np.argsort(pred_ersp)\n", + " rank_ersp = 3 - np.argsort(ordr_ersp)\n", + " \n", + " # Computing for decision classifier 1\n", + " \n", + " if np.mean(rank_n200==rank_ersp)==1:\n", + " rank_decn = rank_n200\n", + " elif np.mean(rank_feat==rank_ersp)==1:\n", + " rank_decn = rank_feat\n", + " elif np.mean(rank_n200==rank_feat)==1:\n", + " rank_decn = rank_n200\n", + " else:\n", + " count = [0, 0, 0]\n", + " count[rank_n200[0]-1] = count[rank_n200[0]-1] + 2\n", + " count[rank_n200[1]-1] = count[rank_n200[1]-1] + 1\n", + " count[rank_feat[0]-1] = count[rank_feat[0]-1] + 2\n", + " count[rank_feat[1]-1] = count[rank_feat[1]-1] + 1\n", + " count[rank_ersp[0]-1] = count[rank_ersp[0]-1] + 2\n", + " count[rank_ersp[1]-1] = count[rank_ersp[1]-1] + 1\n", + " ordr_decn = np.argsort(count)\n", + " rank_decn = 3 - np.argsort(ordr_decn)\n", + " \n", + " # Computing for Random\n", + " pred_rand = np.random.uniform(0,1,[1,3])\n", + " ordr_rand = np.argsort(pred_rand)\n", + " rank_rand = 3 - np.argsort(ordr_rand)\n", + " rank_rand = rank_rand.flatten()\n", + " \n", + " all_ranks = [rank_n200, rank_ersp, rank_feat, rank_decn, rank_rand]\n", + " for idx_r, pred_rank in enumerate(all_ranks):\n", + " rank_metrics[idx_r,0] = rank_metrics[idx_r,0] + np.mean(abs(ground_rank - pred_rank))\n", + " rank_metrics[idx_r,1] = rank_metrics[idx_r,1] + tau_score(ground_rank, pred_rank)\n", + " rank_metrics[idx_r,2] = rank_metrics[idx_r,2] + ndcg_score(true_rel, ground_rank, pred_rank)\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "print \"\\033[1m Fixed Rule based Ranking \\033[0m\"\n", + "print \"Metrics: MHD, Tau, NDCG\"\n", + "methods = ['N200 Mean ', 'ERSP ', 'N200 minima', 'Decision ', 'Random ']\n", + "for idx, item in enumerate(rank_metrics):\n", + " print methods[idx], item/total\n", + " \n", + "# print rank_metrics/total\n", + "# print total\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/riemann/__init__.py b/riemann/__init__.py new file mode 100644 index 0000000..713c826 --- /dev/null +++ b/riemann/__init__.py @@ -0,0 +1 @@ +from utils import * \ No newline at end of file diff --git a/riemann/utils.py b/riemann/utils.py new file mode 100644 index 0000000..8008be7 --- /dev/null +++ b/riemann/utils.py @@ -0,0 +1,188 @@ +import numpy +from numpy import matrix, sqrt, diag, log, exp, mean, eye, triu_indices_from, zeros, cov, concatenate, triu +from numpy.linalg import norm, inv, eigvals +from numpy.linalg import eigh as eig +from scipy.linalg import eigvalsh + +def sqrtm(Ci): + D,V = eig(Ci) + D = matrix(diag(sqrt(D))) + V = matrix(V) + Out = matrix(V*D*V.T) + return Out + +def logm(Ci): + D,V = eig(Ci) + Out = numpy.dot(numpy.multiply(V,log(D)),V.T) + return Out + +def expm(Ci): + D,V = eig(Ci) + D = matrix(diag(exp(D))) + V = matrix(V) + Out = matrix(V*D*V.T) + return Out + +def invsqrtm(Ci): + D,V = eig(Ci) + D = matrix(diag(1.0/sqrt(D))) + V = matrix(V) + Out = matrix(V*D*V.T) + return Out + +def powm(Ci,alpha): + D,V = eig(Ci) + D = matrix(diag(D**alpha)) + V = matrix(V) + Out = matrix(V*D*V.T) + return Out + + +def distance(C0,C1): + return sqrt((log(eigvalsh(C0,C1))**2).sum()) + +def geodesic(C0,C1,alpha): + A = matrix(sqrtm(C0)) + B = matrix(invsqrtm(C0)) + C = B*C1*B + D = matrix(powm(C,alpha)) + E = matrix(A*D*A) + return E + +def tangent_space(covmats,ref): + Nt,Ne,Ne = covmats.shape + Cm12 = invsqrtm(ref) + idx = triu_indices_from(ref) + T = zeros((Nt,Ne*(Ne+1)/2)) + coeffs = (sqrt(2)*triu(numpy.ones((Ne,Ne)),1) + numpy.eye(Ne))[idx] + for index in range(Nt): + tmp = numpy.dot(numpy.dot(Cm12,covmats[index,:,:]),Cm12) + tmp = logm(tmp) + T[index,:] = numpy.multiply(coeffs,tmp[idx]) + return T + +#def untangent_space(T,ref): +# Nt,Nd = T.shape +# Ne = (sqrt(1+8*Nd)-1)/2 +# Cm12 = invsqrtm(ref) +# idx = triu_indices_from(ref) +# covmats = zeros((Nt,Ne,Ne)) +# for index in range(Nt): +# tmp = logm(matrix(Cm12*covmats[index,:,:]*Cm12)) +# #fixme : not very efficient +# tmp = sqrt(2)*triu(tmp,1) + diag(diag(tmp)) +# T[index,:] = tmp[idx] +# return T + +def riemann_mean(covmats,tol=10e-9,maxiter=50): + #init + Nt,Ne,Ne = covmats.shape + C = numpy.mean(covmats,axis=0) + k=0 + J = eye(2) + nu = 1.0 + tau = 10e19 + crit = norm(J,ord='fro') + # stop when J<10^-9 or max iteration = 50 + while (crit>tol) and (ktol): + k=k+1 + C12 = sqrtm(C) + Cm12 = invsqrtm(C) + T = zeros((Ne,Ne)) + + for index in range(Nt): + tmp = numpy.dot(numpy.dot(Cm12,covmats[index,:,:]),Cm12) + T += logm(matrix(tmp)) + + #J = mean(T,axis=0) + J = T/Nt + crit = norm(J,ord='fro') + h = nu*crit + if h < tau: + C = matrix(C12*expm(nu*J)*C12) + nu = 0.95*nu + tau = h + else: + print "bad" + nu = 0.5*nu + + return C + +def logeuclid_mean(covmats): + Nt,Ne,Ne = covmats.shape + T = zeros((Ne,Ne)) + for index in range(Nt): + T+= logm(matrix(covmats[index,:,:])) + C = expm(T/Nt) + + return C + +def logdet_mean(covmats,tol=10e-5,maxiter=50): + #init + Nt,Ne,Ne = covmats.shape + C = mean(covmats,axis=0) + k=0 + J = eye(2) + crit = norm(J,ord='fro') + # stop when J<10^-9 or max iteration = 50 + while (crit>tol) and (k