|
| 1 | +{ |
| 2 | + "cells": [ |
| 3 | + { |
| 4 | + "cell_type": "markdown", |
| 5 | + "metadata": {}, |
| 6 | + "source": [ |
| 7 | + "Sascha Spors,\n", |
| 8 | + "Professorship Signal Theory and Digital Signal Processing,\n", |
| 9 | + "Institute of Communications Engineering (INT),\n", |
| 10 | + "Faculty of Computer Science and Electrical Engineering (IEF),\n", |
| 11 | + "University of Rostock,\n", |
| 12 | + "Germany\n", |
| 13 | + "\n", |
| 14 | + "# Data Driven Audio Signal Processing - A Tutorial with Computational Examples\n", |
| 15 | + "\n", |
| 16 | + "Master Course #24512\n", |
| 17 | + "\n", |
| 18 | + "- lecture: https://github.com/spatialaudio/data-driven-audio-signal-processing-lecture\n", |
| 19 | + "- tutorial: https://github.com/spatialaudio/data-driven-audio-signal-processing-exercise\n", |
| 20 | + "\n", |
| 21 | + "Feel free to contact lecturer [email protected]\n", |
| 22 | + "\n", |
| 23 | + "# PCA on Achieved Points of Written Examination " |
| 24 | + ] |
| 25 | + }, |
| 26 | + { |
| 27 | + "cell_type": "code", |
| 28 | + "execution_count": null, |
| 29 | + "metadata": {}, |
| 30 | + "outputs": [], |
| 31 | + "source": [ |
| 32 | + "import numpy as np\n", |
| 33 | + "import scipy\n", |
| 34 | + "from scipy.linalg import svd, diagsvd\n", |
| 35 | + "import matplotlib as mpl\n", |
| 36 | + "import matplotlib.pyplot as plt\n", |
| 37 | + "\n", |
| 38 | + "np.set_printoptions(precision=3, sign=' ', suppress=True)\n", |
| 39 | + "\n", |
| 40 | + "print(np.__version__) # tested with 1.26.4\n", |
| 41 | + "print(scipy.__version__) # tested with 1.13.1\n", |
| 42 | + "print(mpl.__version__) # tested with 3.9.2" |
| 43 | + ] |
| 44 | + }, |
| 45 | + { |
| 46 | + "cell_type": "code", |
| 47 | + "execution_count": null, |
| 48 | + "metadata": {}, |
| 49 | + "outputs": [], |
| 50 | + "source": [ |
| 51 | + "X = np.loadtxt(open(\"exam_points_meanfree_unitvar.csv\", \"rb\"), delimiter=\";\", skiprows=0)\n", |
| 52 | + "N, F = X.shape\n", |
| 53 | + "print(N, F) # 34 students, 5 tasks for exam on signals & systems, a typical course in electrical engineering bachelor studies\n", |
| 54 | + "# columns correspond to theses tasks\n", |
| 55 | + "task_label = ['Task 1: Convolution', 'Task 2: Fourier', 'Task 3: Sampling', 'Task 4: Laplace Domain', 'Task 5: z-Domain']" |
| 56 | + ] |
| 57 | + }, |
| 58 | + { |
| 59 | + "cell_type": "code", |
| 60 | + "execution_count": null, |
| 61 | + "metadata": {}, |
| 62 | + "outputs": [], |
| 63 | + "source": [ |
| 64 | + "# data in exam_points_meanfree_unitvar.csv is already mean-free and columns have var=1\n", |
| 65 | + "# so the numbers in X do not represent points or percentage,\n", |
| 66 | + "# but rather encode the performance of the students per task in a normalised way\n", |
| 67 | + "# X is however sorted: first row belongs to best grade, last row to worst grade\n", |
| 68 | + "np.mean(X, axis=0), np.std(X, axis=0, ddof=1), np.var(X, axis=0, ddof=1)" |
| 69 | + ] |
| 70 | + }, |
| 71 | + { |
| 72 | + "cell_type": "code", |
| 73 | + "execution_count": null, |
| 74 | + "metadata": {}, |
| 75 | + "outputs": [], |
| 76 | + "source": [ |
| 77 | + "# for completeness of PCA algorithm ->\n", |
| 78 | + "# make X zscore (altough it is already)\n", |
| 79 | + "mu = np.mean(X, axis=0)\n", |
| 80 | + "X = X - mu # de-mean\n", |
| 81 | + "sigma = np.sqrt(np.sum(X**2, axis=0) / (N-1))\n", |
| 82 | + "X = X / sigma # normalise to std=1\n", |
| 83 | + "np.mean(X, axis=0), np.std(X, axis=0, ddof=1), np.var(X, axis=0, ddof=1) # check" |
| 84 | + ] |
| 85 | + }, |
| 86 | + { |
| 87 | + "cell_type": "code", |
| 88 | + "execution_count": null, |
| 89 | + "metadata": {}, |
| 90 | + "outputs": [], |
| 91 | + "source": [ |
| 92 | + "X # print mean=0 / var=1 data matrix" |
| 93 | + ] |
| 94 | + }, |
| 95 | + { |
| 96 | + "cell_type": "code", |
| 97 | + "execution_count": null, |
| 98 | + "metadata": {}, |
| 99 | + "outputs": [], |
| 100 | + "source": [ |
| 101 | + "# get SVD / CovMatrix stuff\n", |
| 102 | + "[U, s, Vh] = svd(X)\n", |
| 103 | + "V = Vh.T # we don't use Vh later on!\n", |
| 104 | + "S = diagsvd(s, N, F) # sing vals matrix\n", |
| 105 | + "D, _ = np.linalg.eig(X.T @ X / (N-1)) # eig vals\n", |
| 106 | + "D = -np.sort(-D) # sort them, then ==\n", |
| 107 | + "d = s**2 / (N-1)\n", |
| 108 | + "print(np.allclose(d, D)) # so we go for d later on\n", |
| 109 | + "\n", |
| 110 | + "# switch polarities for nicer interpretation ot the\n", |
| 111 | + "# exam data\n", |
| 112 | + "V[:,0] *= -1\n", |
| 113 | + "U[:,0] *= -1\n", |
| 114 | + "\n", |
| 115 | + "V[:,2] *= -1\n", |
| 116 | + "U[:,2] *= -1\n", |
| 117 | + "\n", |
| 118 | + "V[:,3] *= -1\n", |
| 119 | + "U[:,3] *= -1" |
| 120 | + ] |
| 121 | + }, |
| 122 | + { |
| 123 | + "cell_type": "code", |
| 124 | + "execution_count": null, |
| 125 | + "metadata": {}, |
| 126 | + "outputs": [], |
| 127 | + "source": [ |
| 128 | + "# PCA\n", |
| 129 | + "US = U @ S\n", |
| 130 | + "PC_Features = US @ diagsvd(1 / np.sqrt(d), F, F) # normalised such that columns have var 1, aka (normalised) PC scores\n", |
| 131 | + "print(np.var(PC_Features, axis=0, ddof=1))\n", |
| 132 | + "#PC_Loadings = (diagsvd(np.sqrt(d), F, F) @ V.T).T # ==\n", |
| 133 | + "PC_Loadings = V @ diagsvd(np.sqrt(d), F, F) # aka PC coeff, not unit-length anymore, but normalised such that it shows correlation between PC_Features and X" |
| 134 | + ] |
| 135 | + }, |
| 136 | + { |
| 137 | + "cell_type": "code", |
| 138 | + "execution_count": null, |
| 139 | + "metadata": {}, |
| 140 | + "outputs": [], |
| 141 | + "source": [ |
| 142 | + "np.allclose(X, PC_Features @ PC_Loadings.T) # check correct matrix factorisation" |
| 143 | + ] |
| 144 | + }, |
| 145 | + { |
| 146 | + "cell_type": "code", |
| 147 | + "execution_count": null, |
| 148 | + "metadata": {}, |
| 149 | + "outputs": [], |
| 150 | + "source": [ |
| 151 | + "# project an x column vector to a pc feature column -> do this for all options -> get all weights for linear comb of pc features\n", |
| 152 | + "# correlation uses unit-length vectors\n", |
| 153 | + "PC_Loadings_manual = np.zeros((F, F))\n", |
| 154 | + "for row in range(F):\n", |
| 155 | + " tmp_x = X[:, row] / np.linalg.norm(X[:, row])\n", |
| 156 | + " for column in range(F):\n", |
| 157 | + " tmp_pc = PC_Features[:, column] / np.linalg.norm(PC_Features[:, column])\n", |
| 158 | + " PC_Loadings_manual[row, column] = np.inner(tmp_pc, tmp_x)\n", |
| 159 | + "np.allclose(PC_Loadings_manual, PC_Loadings) # we get the PC_Loadings matrix" |
| 160 | + ] |
| 161 | + }, |
| 162 | + { |
| 163 | + "cell_type": "code", |
| 164 | + "execution_count": null, |
| 165 | + "metadata": {}, |
| 166 | + "outputs": [], |
| 167 | + "source": [ |
| 168 | + "# explained variance\n", |
| 169 | + "d, np.var(US, axis=0, ddof=1)" |
| 170 | + ] |
| 171 | + }, |
| 172 | + { |
| 173 | + "cell_type": "code", |
| 174 | + "execution_count": null, |
| 175 | + "metadata": {}, |
| 176 | + "outputs": [], |
| 177 | + "source": [ |
| 178 | + "# explained cum variance in %\n", |
| 179 | + "cum_var = np.cumsum(d) / np.sum(d) * 100\n", |
| 180 | + "cum_var" |
| 181 | + ] |
| 182 | + }, |
| 183 | + { |
| 184 | + "cell_type": "markdown", |
| 185 | + "metadata": {}, |
| 186 | + "source": [ |
| 187 | + "# Check via Plots" |
| 188 | + ] |
| 189 | + }, |
| 190 | + { |
| 191 | + "cell_type": "code", |
| 192 | + "execution_count": null, |
| 193 | + "metadata": {}, |
| 194 | + "outputs": [], |
| 195 | + "source": [ |
| 196 | + "plt.figure(figsize=(12,8))\n", |
| 197 | + "\n", |
| 198 | + "plt.subplot(2,1,1)\n", |
| 199 | + "for f in range(F):\n", |
| 200 | + " plt.plot(X[:, f], 'o-', color='C'+str(f), label='Task '+str(f+1), ms=3)\n", |
| 201 | + "plt.legend(loc='lower left')\n", |
| 202 | + "plt.xticks([0, N-1], labels=['best grade', 'worst grade'])\n", |
| 203 | + "plt.ylabel('normalised points (mean-free, var=1)')\n", |
| 204 | + "plt.grid(True)\n", |
| 205 | + "plt.title(task_label)\n", |
| 206 | + "\n", |
| 207 | + "plt.subplot(2,1,2)\n", |
| 208 | + "for f in range(F):\n", |
| 209 | + " plt.plot(US[:, f], 'o-', color='C'+str(f), label='PCA v '+str(f+1), lw=(F-f)*2/3, ms=(F-f)*3/2)\n", |
| 210 | + "plt.legend(loc='lower left')\n", |
| 211 | + "plt.xticks([0, N-1], labels=['best grade', 'worst grade'])\n", |
| 212 | + "plt.ylabel('PC features (mean-free, sorted var)')\n", |
| 213 | + "plt.xlabel('student index (sorted grade)')\n", |
| 214 | + "plt.grid(True)\n", |
| 215 | + "plt.title(['cum var in %:', cum_var])\n", |
| 216 | + "plt.tight_layout()" |
| 217 | + ] |
| 218 | + }, |
| 219 | + { |
| 220 | + "cell_type": "code", |
| 221 | + "execution_count": null, |
| 222 | + "metadata": {}, |
| 223 | + "outputs": [], |
| 224 | + "source": [ |
| 225 | + "# correlation between task and pc\n", |
| 226 | + "pc_label = ['PC 1', 'PC 2', 'PC 3', 'PC 4', 'PC 5']\n", |
| 227 | + "cmap = plt.get_cmap('Spectral_r', 8)\n", |
| 228 | + "fig = plt.figure(figsize=(6,4))\n", |
| 229 | + "ax = fig.add_subplot(111)\n", |
| 230 | + "cax = ax.matshow(PC_Loadings, cmap=cmap, vmin=-1, vmax=+1)\n", |
| 231 | + "fig.colorbar(cax)\n", |
| 232 | + "ax.set_xticks(np.arange(len(pc_label)))\n", |
| 233 | + "ax.set_yticks(np.arange(len(task_label)))\n", |
| 234 | + "ax.set_xticklabels(pc_label)\n", |
| 235 | + "ax.set_yticklabels(task_label)\n", |
| 236 | + "ax.set_title('Loading Matrix = PC x contributes to Task y')\n", |
| 237 | + "plt.tight_layout()\n", |
| 238 | + "\n", |
| 239 | + "# a rank 3 approximation of the data,\n", |
| 240 | + "# i.e. using only PC1, PC2 and PC3 in the linear combination to reconstruct X\n", |
| 241 | + "# would only change one grade by a 1/3 grade step\n", |
| 242 | + "# so 85.899 % explained variance would be enough to figure the actual grading\n", |
| 243 | + "\n", |
| 244 | + "# PC1 and PC2 might allow an intuitive interpretation:\n", |
| 245 | + "# students are very well prepared to convolution, Laplace and z-Domain tasks\n", |
| 246 | + "# as theses tasks are always very similar and definitiely will be queried in the exam\n", |
| 247 | + "# so, PC1 indidcates the performance on 'fulfilled' expectations and is\n", |
| 248 | + "# highly correlated with the achieved grade\n", |
| 249 | + "# the Fourier task and the sampling task were chosen out of a wide range of options\n", |
| 250 | + "# here students have rather 'unknown' expectations, which is why we need PC 2 to cover this\n", |
| 251 | + "#\n", |
| 252 | + "# PC 3 to 5 show positive vs. negative correlations, i.e. mostly one good task vs. one bad task performance\n", |
| 253 | + "# some of these results are intuitive: we know that students sometimes have preferences for Laplace vs. z-Domain " |
| 254 | + ] |
| 255 | + }, |
| 256 | + { |
| 257 | + "cell_type": "code", |
| 258 | + "execution_count": null, |
| 259 | + "metadata": {}, |
| 260 | + "outputs": [], |
| 261 | + "source": [ |
| 262 | + "np.sum(PC_Loadings**2, axis=0) # that's again the explained variance of the PCs" |
| 263 | + ] |
| 264 | + }, |
| 265 | + { |
| 266 | + "cell_type": "code", |
| 267 | + "execution_count": null, |
| 268 | + "metadata": {}, |
| 269 | + "outputs": [], |
| 270 | + "source": [ |
| 271 | + "np.sum(PC_Loadings**2, axis=1) # communalities, must sum to 1 in our normalised handling" |
| 272 | + ] |
| 273 | + }, |
| 274 | + { |
| 275 | + "cell_type": "markdown", |
| 276 | + "metadata": {}, |
| 277 | + "source": [ |
| 278 | + "## Copyright\n", |
| 279 | + "\n", |
| 280 | + "- the notebooks are provided as [Open Educational Resources](https://en.wikipedia.org/wiki/Open_educational_resources)\n", |
| 281 | + "- feel free to use the notebooks for your own purposes\n", |
| 282 | + "- the text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/)\n", |
| 283 | + "- the code of the IPython examples is licensed under the [MIT license](https://opensource.org/licenses/MIT)\n", |
| 284 | + "- please attribute the work as follows: *Frank Schultz, Data Driven Audio Signal Processing - A Tutorial Featuring Computational Examples, University of Rostock* ideally with relevant file(s), github URL https://github.com/spatialaudio/data-driven-audio-signal-processing-exercise, commit number and/or version tag, year." |
| 285 | + ] |
| 286 | + } |
| 287 | + ], |
| 288 | + "metadata": { |
| 289 | + "kernelspec": { |
| 290 | + "display_name": "myddasp", |
| 291 | + "language": "python", |
| 292 | + "name": "python3" |
| 293 | + }, |
| 294 | + "language_info": { |
| 295 | + "codemirror_mode": { |
| 296 | + "name": "ipython", |
| 297 | + "version": 3 |
| 298 | + }, |
| 299 | + "file_extension": ".py", |
| 300 | + "mimetype": "text/x-python", |
| 301 | + "name": "python", |
| 302 | + "nbconvert_exporter": "python", |
| 303 | + "pygments_lexer": "ipython3", |
| 304 | + "version": "3.12.3" |
| 305 | + } |
| 306 | + }, |
| 307 | + "nbformat": 4, |
| 308 | + "nbformat_minor": 2 |
| 309 | +} |
0 commit comments