Skip to content

Commit 6e92061

Browse files
committed
pca on exam grades
1 parent 87dd563 commit 6e92061

File tree

3 files changed

+344
-0
lines changed

3 files changed

+344
-0
lines changed

exam_points_meanfree_unitvar.csv

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
1.408367168897572741e+00;1.757964546987063059e+00;1.412795836484065815e+00;1.539532365180545703e+00;4.848876374569938141e-01
2+
1.054976144967554541e+00;1.757964546987063059e+00;4.597192801257678485e-01;1.539532365180545703e+00;1.390721685453574885e+00
3+
1.231671656932563641e+00;6.645963531292558013e-01;4.597192801257678485e-01;1.699118037180968210e+00;1.571888495052891077e+00
4+
3.481940971075180302e-01;1.029052417748524961e+00;2.556487704114023707e+00;1.858703709181390717e+00;-4.209464105395876454e-01
5+
1.761758192827591163e+00;-4.287718407285517341e-01;1.031565213940746961e+00;5.820183331780111047e-01;8.472212566556260871e-01
6+
3.481940971075180302e-01;1.029052417748524961e+00;2.691039688541081443e-01;-3.754956988245231608e-01;1.209554875854258693e+00
7+
-5.352834627175273585e-01;-6.431577610928257416e-02;1.222180525212406499e+00;7.416040051784336118e-01;1.028388066254942279e+00
8+
3.481940971075180302e-01;6.645963531292558013e-01;4.597192801257678485e-01;-5.632435482367830620e-02;8.472212566556260871e-01
9+
7.015851210375363411e-01;-1.157683969967089999e+00;7.848865758244849555e-02;1.539532365180545703e+00;8.472212566556260871e-01
10+
7.015851210375363411e-01;8.468243854388903813e-01;4.597192801257678485e-01;-3.754956988245231608e-01;4.848876374569938141e-01
11+
-1.818924387875090753e-01;-6.431577610928257416e-02;6.503345913974275527e-01;5.820183331780111047e-01;6.660544470563101171e-01
12+
1.054976144967554541e+00;-1.157683969967089999e+00;-1.121266536892111809e-01;9.011896771788560079e-01;8.472212566556260871e-01
13+
7.015851210375363411e-01;-6.431577610928257416e-02;6.503345913974275527e-01;-5.632435482367830620e-02;3.037208278576775111e-01
14+
-5.196926822499936365e-03;1.393508482367794121e+00;8.409499026690872014e-01;-3.754956988245231608e-01;-4.209464105395876454e-01
15+
-1.818924387875090753e-01;-1.157683969967089999e+00;8.409499026690872014e-01;4.224326611775887086e-01;6.660544470563101171e-01
16+
1.408367168897572741e+00;6.645963531292558013e-01;-1.446433832590828805e+00;4.224326611775887086e-01;-6.021132201389038929e-01
17+
1.054976144967554541e+00;6.645963531292558013e-01;-8.745878987758498591e-01;-5.350813708249456679e-01;1.225540182583612220e-01
18+
1.714985851425088748e-01;-1.157683969967089999e+00;6.503345913974275527e-01;1.032613171767441246e-01;4.848876374569938141e-01
19+
-1.818924387875090753e-01;-1.157683969967089999e+00;-4.933572762325305061e-01;7.416040051784336118e-01;1.209554875854258693e+00
20+
-7.119789746825364585e-01;6.645963531292558013e-01;1.222180525212406499e+00;-6.946670428253680640e-01;-7.832800297382201959e-01
21+
-1.818924387875090753e-01;-1.157683969967089999e+00;7.848865758244849555e-02;-3.754956988245231608e-01;1.209554875854258693e+00
22+
-3.585879507525182031e-01;6.645963531292558013e-01;-6.839725875041902103e-01;-5.632435482367830620e-02;-2.397796009402713424e-01
23+
1.714985851425088748e-01;-7.932279053478209496e-01;-4.933572762325305061e-01;5.820183331780111047e-01;-7.832800297382201959e-01
24+
3.481940971075180302e-01;-1.157683969967089999e+00;-1.255818521319169268e+00;5.820183331780111047e-01;-5.861279134095506022e-02
25+
7.015851210375363411e-01;-1.157683969967089999e+00;1.031565213940746961e+00;-1.492595402827480155e+00;-7.832800297382201959e-01
26+
-1.818924387875090753e-01;-6.431577610928257416e-02;-6.839725875041902103e-01;-1.333009730827057870e+00;3.037208278576775111e-01
27+
-1.948847558437600380e+00;8.468243854388903813e-01;-6.839725875041902103e-01;-3.754956988245231608e-01;-6.021132201389038929e-01
28+
-3.585879507525182031e-01;-1.157683969967089999e+00;-1.446433832590828805e+00;-3.754956988245231608e-01;-6.021132201389038929e-01
29+
-1.418761022542573080e+00;-1.157683969967089999e+00;-3.027419649608708574e-01;-5.350813708249456679e-01;-7.832800297382201959e-01
30+
-2.125543070402609480e+00;1.393508482367794121e+00;-1.065203210047509508e+00;-1.173424058826635363e+00;-1.326780458536169105e+00
31+
-1.242065510577563980e+00;6.645963531292558013e-01;-1.446433832590828805e+00;-1.333009730827057870e+00;-1.870280887334117903e+00
32+
-8.886744866475456694e-01;-1.157683969967089999e+00;-8.745878987758498591e-01;-8.542527148257904601e-01;-1.507947268135485297e+00
33+
-1.772152046472591280e+00;3.001402885099865858e-01;-1.065203210047509508e+00;-1.971352418828747455e+00;-1.870280887334117903e+00
34+
-1.242065510577563980e+00;-7.932279053478209496e-01;-1.446433832590828805e+00;-1.492595402827480155e+00;-1.870280887334117903e+00

index.ipynb

+1
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@
7070
"- [pca_2D.ipynb](pca_2D.ipynb)\n",
7171
"- [pca_3D.ipynb](pca_3D.ipynb)\n",
7272
"- [pca_audio_features.ipynb](pca_audio_features.ipynb)\n",
73+
"- [pca_of_exam_grades.ipynb](pca_of_exam_grades.ipynb)\n",
7374
"\n",
7475
"## Exercise: Bias Variance Trade-Off vs. Model Complexity\n",
7576
"- [Bias-Variance Trade-Off vs. Model Complexity](bias_variance_linear_regression.ipynb)\n",

pca_of_exam_grades.ipynb

+309
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,309 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"Sascha Spors,\n",
8+
"Professorship Signal Theory and Digital Signal Processing,\n",
9+
"Institute of Communications Engineering (INT),\n",
10+
"Faculty of Computer Science and Electrical Engineering (IEF),\n",
11+
"University of Rostock,\n",
12+
"Germany\n",
13+
"\n",
14+
"# Data Driven Audio Signal Processing - A Tutorial with Computational Examples\n",
15+
"\n",
16+
"Master Course #24512\n",
17+
"\n",
18+
"- lecture: https://github.com/spatialaudio/data-driven-audio-signal-processing-lecture\n",
19+
"- tutorial: https://github.com/spatialaudio/data-driven-audio-signal-processing-exercise\n",
20+
"\n",
21+
"Feel free to contact lecturer [email protected]\n",
22+
"\n",
23+
"# PCA on Achieved Points of Written Examination "
24+
]
25+
},
26+
{
27+
"cell_type": "code",
28+
"execution_count": null,
29+
"metadata": {},
30+
"outputs": [],
31+
"source": [
32+
"import numpy as np\n",
33+
"import scipy\n",
34+
"from scipy.linalg import svd, diagsvd\n",
35+
"import matplotlib as mpl\n",
36+
"import matplotlib.pyplot as plt\n",
37+
"\n",
38+
"np.set_printoptions(precision=3, sign=' ', suppress=True)\n",
39+
"\n",
40+
"print(np.__version__) # tested with 1.26.4\n",
41+
"print(scipy.__version__) # tested with 1.13.1\n",
42+
"print(mpl.__version__) # tested with 3.9.2"
43+
]
44+
},
45+
{
46+
"cell_type": "code",
47+
"execution_count": null,
48+
"metadata": {},
49+
"outputs": [],
50+
"source": [
51+
"X = np.loadtxt(open(\"exam_points_meanfree_unitvar.csv\", \"rb\"), delimiter=\";\", skiprows=0)\n",
52+
"N, F = X.shape\n",
53+
"print(N, F) # 34 students, 5 tasks for exam on signals & systems, a typical course in electrical engineering bachelor studies\n",
54+
"# columns correspond to theses tasks\n",
55+
"task_label = ['Task 1: Convolution', 'Task 2: Fourier', 'Task 3: Sampling', 'Task 4: Laplace Domain', 'Task 5: z-Domain']"
56+
]
57+
},
58+
{
59+
"cell_type": "code",
60+
"execution_count": null,
61+
"metadata": {},
62+
"outputs": [],
63+
"source": [
64+
"# data in exam_points_meanfree_unitvar.csv is already mean-free and columns have var=1\n",
65+
"# so the numbers in X do not represent points or percentage,\n",
66+
"# but rather encode the performance of the students per task in a normalised way\n",
67+
"# X is however sorted: first row belongs to best grade, last row to worst grade\n",
68+
"np.mean(X, axis=0), np.std(X, axis=0, ddof=1), np.var(X, axis=0, ddof=1)"
69+
]
70+
},
71+
{
72+
"cell_type": "code",
73+
"execution_count": null,
74+
"metadata": {},
75+
"outputs": [],
76+
"source": [
77+
"# for completeness of PCA algorithm ->\n",
78+
"# make X zscore (altough it is already)\n",
79+
"mu = np.mean(X, axis=0)\n",
80+
"X = X - mu # de-mean\n",
81+
"sigma = np.sqrt(np.sum(X**2, axis=0) / (N-1))\n",
82+
"X = X / sigma # normalise to std=1\n",
83+
"np.mean(X, axis=0), np.std(X, axis=0, ddof=1), np.var(X, axis=0, ddof=1) # check"
84+
]
85+
},
86+
{
87+
"cell_type": "code",
88+
"execution_count": null,
89+
"metadata": {},
90+
"outputs": [],
91+
"source": [
92+
"X # print mean=0 / var=1 data matrix"
93+
]
94+
},
95+
{
96+
"cell_type": "code",
97+
"execution_count": null,
98+
"metadata": {},
99+
"outputs": [],
100+
"source": [
101+
"# get SVD / CovMatrix stuff\n",
102+
"[U, s, Vh] = svd(X)\n",
103+
"V = Vh.T # we don't use Vh later on!\n",
104+
"S = diagsvd(s, N, F) # sing vals matrix\n",
105+
"D, _ = np.linalg.eig(X.T @ X / (N-1)) # eig vals\n",
106+
"D = -np.sort(-D) # sort them, then ==\n",
107+
"d = s**2 / (N-1)\n",
108+
"print(np.allclose(d, D)) # so we go for d later on\n",
109+
"\n",
110+
"# switch polarities for nicer interpretation ot the\n",
111+
"# exam data\n",
112+
"V[:,0] *= -1\n",
113+
"U[:,0] *= -1\n",
114+
"\n",
115+
"V[:,2] *= -1\n",
116+
"U[:,2] *= -1\n",
117+
"\n",
118+
"V[:,3] *= -1\n",
119+
"U[:,3] *= -1"
120+
]
121+
},
122+
{
123+
"cell_type": "code",
124+
"execution_count": null,
125+
"metadata": {},
126+
"outputs": [],
127+
"source": [
128+
"# PCA\n",
129+
"US = U @ S\n",
130+
"PC_Features = US @ diagsvd(1 / np.sqrt(d), F, F) # normalised such that columns have var 1, aka (normalised) PC scores\n",
131+
"print(np.var(PC_Features, axis=0, ddof=1))\n",
132+
"#PC_Loadings = (diagsvd(np.sqrt(d), F, F) @ V.T).T # ==\n",
133+
"PC_Loadings = V @ diagsvd(np.sqrt(d), F, F) # aka PC coeff, not unit-length anymore, but normalised such that it shows correlation between PC_Features and X"
134+
]
135+
},
136+
{
137+
"cell_type": "code",
138+
"execution_count": null,
139+
"metadata": {},
140+
"outputs": [],
141+
"source": [
142+
"np.allclose(X, PC_Features @ PC_Loadings.T) # check correct matrix factorisation"
143+
]
144+
},
145+
{
146+
"cell_type": "code",
147+
"execution_count": null,
148+
"metadata": {},
149+
"outputs": [],
150+
"source": [
151+
"# project an x column vector to a pc feature column -> do this for all options -> get all weights for linear comb of pc features\n",
152+
"# correlation uses unit-length vectors\n",
153+
"PC_Loadings_manual = np.zeros((F, F))\n",
154+
"for row in range(F):\n",
155+
" tmp_x = X[:, row] / np.linalg.norm(X[:, row])\n",
156+
" for column in range(F):\n",
157+
" tmp_pc = PC_Features[:, column] / np.linalg.norm(PC_Features[:, column])\n",
158+
" PC_Loadings_manual[row, column] = np.inner(tmp_pc, tmp_x)\n",
159+
"np.allclose(PC_Loadings_manual, PC_Loadings) # we get the PC_Loadings matrix"
160+
]
161+
},
162+
{
163+
"cell_type": "code",
164+
"execution_count": null,
165+
"metadata": {},
166+
"outputs": [],
167+
"source": [
168+
"# explained variance\n",
169+
"d, np.var(US, axis=0, ddof=1)"
170+
]
171+
},
172+
{
173+
"cell_type": "code",
174+
"execution_count": null,
175+
"metadata": {},
176+
"outputs": [],
177+
"source": [
178+
"# explained cum variance in %\n",
179+
"cum_var = np.cumsum(d) / np.sum(d) * 100\n",
180+
"cum_var"
181+
]
182+
},
183+
{
184+
"cell_type": "markdown",
185+
"metadata": {},
186+
"source": [
187+
"# Check via Plots"
188+
]
189+
},
190+
{
191+
"cell_type": "code",
192+
"execution_count": null,
193+
"metadata": {},
194+
"outputs": [],
195+
"source": [
196+
"plt.figure(figsize=(12,8))\n",
197+
"\n",
198+
"plt.subplot(2,1,1)\n",
199+
"for f in range(F):\n",
200+
" plt.plot(X[:, f], 'o-', color='C'+str(f), label='Task '+str(f+1), ms=3)\n",
201+
"plt.legend(loc='lower left')\n",
202+
"plt.xticks([0, N-1], labels=['best grade', 'worst grade'])\n",
203+
"plt.ylabel('normalised points (mean-free, var=1)')\n",
204+
"plt.grid(True)\n",
205+
"plt.title(task_label)\n",
206+
"\n",
207+
"plt.subplot(2,1,2)\n",
208+
"for f in range(F):\n",
209+
" plt.plot(US[:, f], 'o-', color='C'+str(f), label='PCA v '+str(f+1), lw=(F-f)*2/3, ms=(F-f)*3/2)\n",
210+
"plt.legend(loc='lower left')\n",
211+
"plt.xticks([0, N-1], labels=['best grade', 'worst grade'])\n",
212+
"plt.ylabel('PC features (mean-free, sorted var)')\n",
213+
"plt.xlabel('student index (sorted grade)')\n",
214+
"plt.grid(True)\n",
215+
"plt.title(['cum var in %:', cum_var])\n",
216+
"plt.tight_layout()"
217+
]
218+
},
219+
{
220+
"cell_type": "code",
221+
"execution_count": null,
222+
"metadata": {},
223+
"outputs": [],
224+
"source": [
225+
"# correlation between task and pc\n",
226+
"pc_label = ['PC 1', 'PC 2', 'PC 3', 'PC 4', 'PC 5']\n",
227+
"cmap = plt.get_cmap('Spectral_r', 8)\n",
228+
"fig = plt.figure(figsize=(6,4))\n",
229+
"ax = fig.add_subplot(111)\n",
230+
"cax = ax.matshow(PC_Loadings, cmap=cmap, vmin=-1, vmax=+1)\n",
231+
"fig.colorbar(cax)\n",
232+
"ax.set_xticks(np.arange(len(pc_label)))\n",
233+
"ax.set_yticks(np.arange(len(task_label)))\n",
234+
"ax.set_xticklabels(pc_label)\n",
235+
"ax.set_yticklabels(task_label)\n",
236+
"ax.set_title('Loading Matrix = PC x contributes to Task y')\n",
237+
"plt.tight_layout()\n",
238+
"\n",
239+
"# a rank 3 approximation of the data,\n",
240+
"# i.e. using only PC1, PC2 and PC3 in the linear combination to reconstruct X\n",
241+
"# would only change one grade by a 1/3 grade step\n",
242+
"# so 85.899 % explained variance would be enough to figure the actual grading\n",
243+
"\n",
244+
"# PC1 and PC2 might allow an intuitive interpretation:\n",
245+
"# students are very well prepared to convolution, Laplace and z-Domain tasks\n",
246+
"# as theses tasks are always very similar and definitiely will be queried in the exam\n",
247+
"# so, PC1 indidcates the performance on 'fulfilled' expectations and is\n",
248+
"# highly correlated with the achieved grade\n",
249+
"# the Fourier task and the sampling task were chosen out of a wide range of options\n",
250+
"# here students have rather 'unknown' expectations, which is why we need PC 2 to cover this\n",
251+
"#\n",
252+
"# PC 3 to 5 show positive vs. negative correlations, i.e. mostly one good task vs. one bad task performance\n",
253+
"# some of these results are intuitive: we know that students sometimes have preferences for Laplace vs. z-Domain "
254+
]
255+
},
256+
{
257+
"cell_type": "code",
258+
"execution_count": null,
259+
"metadata": {},
260+
"outputs": [],
261+
"source": [
262+
"np.sum(PC_Loadings**2, axis=0) # that's again the explained variance of the PCs"
263+
]
264+
},
265+
{
266+
"cell_type": "code",
267+
"execution_count": null,
268+
"metadata": {},
269+
"outputs": [],
270+
"source": [
271+
"np.sum(PC_Loadings**2, axis=1) # communalities, must sum to 1 in our normalised handling"
272+
]
273+
},
274+
{
275+
"cell_type": "markdown",
276+
"metadata": {},
277+
"source": [
278+
"## Copyright\n",
279+
"\n",
280+
"- the notebooks are provided as [Open Educational Resources](https://en.wikipedia.org/wiki/Open_educational_resources)\n",
281+
"- feel free to use the notebooks for your own purposes\n",
282+
"- the text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/)\n",
283+
"- the code of the IPython examples is licensed under the [MIT license](https://opensource.org/licenses/MIT)\n",
284+
"- please attribute the work as follows: *Frank Schultz, Data Driven Audio Signal Processing - A Tutorial Featuring Computational Examples, University of Rostock* ideally with relevant file(s), github URL https://github.com/spatialaudio/data-driven-audio-signal-processing-exercise, commit number and/or version tag, year."
285+
]
286+
}
287+
],
288+
"metadata": {
289+
"kernelspec": {
290+
"display_name": "myddasp",
291+
"language": "python",
292+
"name": "python3"
293+
},
294+
"language_info": {
295+
"codemirror_mode": {
296+
"name": "ipython",
297+
"version": 3
298+
},
299+
"file_extension": ".py",
300+
"mimetype": "text/x-python",
301+
"name": "python",
302+
"nbconvert_exporter": "python",
303+
"pygments_lexer": "ipython3",
304+
"version": "3.12.3"
305+
}
306+
},
307+
"nbformat": 4,
308+
"nbformat_minor": 2
309+
}

0 commit comments

Comments
 (0)