-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathanalysis_ORIG_MOVIES.py
975 lines (750 loc) · 33 KB
/
analysis_ORIG_MOVIES.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
# -*- coding: utf-8 -*-
"""analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1LEpB2ZVaYWKc5xgENwpl8WYVqct_QlWI
"""
import itertools
import numpy as np
import os
import re
import matplotlib.pyplot as plt
import seaborn as sns
import sys
sys.path.append("../src")
# from utils import *
from collections import Counter
import gc
import random
sns.set(rc={'figure.figsize': (15, 12)})
from gensim.models import KeyedVectors, Word2Vec
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, confusion_matrix, f1_score, plot_confusion_matrix
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import pandas as pd
from itertools import chain
import torch
import torch.nn as nn
import torch.optim as optim
import torch.functional as F
from torch.utils.data import DataLoader, Dataset
from textblob import Word
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
from itertools import groupby
import io
def get_sentese_tag_lists(filename):
lines = []
with open(filename) as fp:
for line in fp:
lines.append(line)
sentences = (list(g) for k, g in groupby(lines, key=lambda x: x != '\n') if k)
# for sentence in sentences:
# yield sentence
list_of_lists_of_tags = []
list_of_lists_of_tokens = []
for sentence in sentences:
tags_in_sentence = []
tokens_in_sentence = []
for tag_and_token in sentence:
tag, token = tag_and_token.split('\t')
if '\n' not in token:
raise
token = token.replace('\n', '')
tags_in_sentence.append(tag)
tokens_in_sentence.append(token)
list_of_lists_of_tags.append(tags_in_sentence)
list_of_lists_of_tokens.append(tokens_in_sentence)
tags_tokens_tuple_list = []
for tags_in_sentence, tokens_in_sentence in zip(list_of_lists_of_tags, list_of_lists_of_tokens):
tags_tokens_tuple_list.append(list(zip(tags_in_sentence, tokens_in_sentence)))
return tags_tokens_tuple_list
def get_token_tags(sentences):
list_of_tokens_lists = []
list_of_tags_lists = []
for sentence in sentences:
tokens_list, tags_list = [], []
for tag, token in sentence:
tags_list.append(tag)
tokens_list.append(token)
list_of_tokens_lists.append(tokens_list)
list_of_tags_lists.append(tags_list)
return list_of_tags_lists, list_of_tokens_lists
"""# 1.0 Exploratory Analysis"""
path = 'data/'
train_filepath = os.path.join(path, 'trivia10k13test.bio.txt')
test_filepath = os.path.join(path, 'trivia10k13train.bio.txt')
train_data_generator = list(get_sentese_tag_lists(train_filepath))
test_data_generator = list(get_sentese_tag_lists(test_filepath))
train_tags, train_tokens = get_token_tags(train_data_generator)
test_tags, test_tokens = get_token_tags(test_data_generator)
# Sentence length distribution
lengths = list(map(lambda x: len(x), train_data_generator))
sns.distplot(lengths)
plt.xlabel("Number of tokens in a sentence")
plt.ylabel("Proportion")
plt.show()
print("Median: ", np.median(lengths))
print("Average: ", round(np.mean(lengths), 2))
def summary(item_list, limit=30): #@todo: restore limit = None
flat_list = list(itertools.chain.from_iterable(item_list))
# count_dict = dict(Counter(flat_list))#changed
count_dict = dict(Counter(flat_list))
count_items = sorted(count_dict.items(), key=lambda x: x[1], reverse=True)
print("Number of unique items: ", len(count_items))
print("Average count: ", round(len(flat_list) / len(count_items)), "\n")
total_items = len(flat_list)
proportion_list = []
xlabels = []
for i, (key, value) in enumerate(count_items):
if limit:
if i > limit:
break
proportion = round(value * 100 / total_items, 2)
proportion_list.append(proportion)
xlabels.append(key)
# print(key, " ---> ", proportion, "%")
sns.set(style="whitegrid")
chart = sns.barplot(xlabels, proportion_list, orient="v")
plt.xlabel("Tokens/Tag")
plt.ylabel("Percentage of total Tokens/Tags")
chart.set_xticklabels(chart.get_xticklabels(), rotation=45)
plt.show()
summary(train_tags)
# Number of beginnings vs Intermediate Tags
flat_list = list(itertools.chain.from_iterable(train_tags))
# tags_counts = dict(Counter(train_tags))#changed
tags_counts = dict(Counter(flat_list))
beginning = 0
intermediate = 0
none = 0
for key, value in tags_counts.items():
if key.startswith("B"):
beginning += value
elif key.startswith("I"):
intermediate += value
else:
none += value
total = beginning + intermediate + none
print("Beginning tags proportion (B-) -> ", round(beginning * 100 / total, 2), "%")
print("Intermediate (I-) tags proportion -> ", round(intermediate * 100 / total, 2), "%")
print("None (O) tags proportion -> ", round(none * 100 / total, 2), "%")
# B- = beginning of an entity
# I- = intermediate/end of the entity
def average_entity_length(tags, start, end):
'''
Total ((#Beginnings + #Intermediate)/#Beginnings)
'''
flat_list = list(itertools.chain.from_iterable(tags))
tags_counter = dict(Counter(flat_list))
total = tags_counter[start] + tags_counter[end]
average = round(total / tags_counter[start], 2)
print("Average length of " + start[2:], " is: ", average)
# train_datasize = len(train_data_generator)#changed
train_datasize = len(list(train_data_generator))
flat_train_tokens_list = list(itertools.chain.from_iterable(train_tokens))
print("Average length of a sentence is: ", round(len(flat_train_tokens_list) / train_datasize, 2))
# average_entity_length(train_tags, "B-Plot", "I-Plot")
# average_entity_length(train_tags, "B-Actor", "I-Actor")
# average_entity_length(train_tags, "B-Origin", "I-Origin")
# average_entity_length(train_tags, "B-Director", "I-Director")
# average_entity_length(train_tags, "B-Relationship", "I-Relationship")
# average_entity_length(train_tags, "B-Character_Name", "I-Character_Name")
# average_entity_length(train_tags, "B-Quote", "I-Quote")
# average_entity_length(train_tags, "B-Opinion", "I-Opinion")
# average_entity_length(train_tags, "B-Soundtrack", "I-Soundtrack")
def tag_presence(token_tag_tuples, tag_to_find):
for tag, token in token_tag_tuples:
if tag == tag_to_find:
return True
return False
def average_starts(generator, tags, dataset_size):
flat_list = list(itertools.chain.from_iterable(tags))
tags_counter = dict(Counter(flat_list))
tags_counter = sorted(tags_counter.items(), key=lambda x: x[1], reverse=True)
for key, value in tags_counter:
if key.startswith("B"):
sentence_with_tag = len(
list(filter(lambda token_tag_tuples: tag_presence(token_tag_tuples, key), generator)))
print("Percentage of sentences having " + key[2:], " are: ",
round(sentence_with_tag * 100 / dataset_size, 2), "%")
average_starts(train_data_generator, train_tags, train_datasize)
# Analyzing tokens
summary(train_tokens, 40)
def words_count(tokens, limit):
flat_train_tokens_list = list(itertools.chain.from_iterable(tokens))
tokens_dict = dict(Counter(flat_train_tokens_list))
tokens_count = tokens_dict.items()
prop_list = []
print("Vocabulary Size: ", len(tokens_count))
for i in range(limit):
tokens_filtered = len(list(filter(lambda x: x[1] <= i, tokens_count)))
prop_list.append(round(tokens_filtered * 100 / len(tokens_count), 2))
plt.plot(prop_list)
plt.xlabel("Counts")
plt.ylabel("Proportion of Vocabulary (%)")
plt.show()
# print("Proportion of unique words less than",limit,": ", round(tokens_filtered*100/len(tokens_dict),2),"%")
words_count(train_tokens, 21)
# numbers > <YEAR>
"""## 1.1 What do we know?
### 1.11 Entities
1. Our dataset it comprised of short sentences. Mostly questions describing movies.
2. Average length of a sentence is 20 tokens.
3. The raw data does not contain punctuations.
4. There are 25 classes of entities, including None (O)
5. Structurally, they are either B Tags (Beginning of an entity), I Tag (Intermediate Entity), Or None Tag (O)
6. For every I tag, there is exactly one B tag. However, more than one I tags can share a common B tag.
7. In our case, proportions of B,I,O are about 15%, 50%, 35% respectively. Meaning, our data is rich in entities. Also, most entities are multi-word.
8. Nearly 45% of entities relate to Plot (B-Plot or I-plot). There is a major class imbalance. This is followed by None Tag (~35%). Rest 22 entities comprise only 20% of the chunk. Getting these
minority classes correct is a challenge.
9. Plot, Origin, Relationship, Soundtrack, Quote, Opinion are lengthy entites and almost appear in phrases. These four and None tag share common words like is,to,the,of, etc. Rest of them have
propoer nouns (names).
### 1.12 Vocabulary
1. The vocabulary size is close to 11000. This is large!
2. 60% of the vocabulary has a frequency less than or equal to 2. We obviously cannot remove any vocabulary directly. We need to think of some other way to normalize words.
3. Idea - Numbers can be replaced by a common <NUM> tag. That should reduce our vocabulary substantially.
4. Since this is not 'conversational' or 'chat' text, there might not be slangs like 'Wowwww' and 'Woww'. We don't expect such modifications.
5. There are a tonne of '*'s'*'. Since punctuations are removed, they exist as single 's'. We can replace them by 'is'.
6. Similarly, 'can't' exists as ca, n, t. Three tokens. We can replace ca by can, remove single t, replace single n by not.
7. 'll' can be replaced by 'will'
# 2.0 Pre-Processing
"""
def replace(sentence, to_replace, replace_by):
for i, (token, tag) in enumerate(sentence):
if token == to_replace:
sentence[i] = (replace_by, tag)
return sentence
def replace_num(sentence, replace_by="NUM"):
for i, (token, tag) in enumerate(sentence):
if token.isnumeric():
sentence[i] = (replace_by, tag)
return sentence
def replace_nt(sentence):
sentence = list(filter(lambda x: x[0] != "t", sentence))
for i in range(len(sentence)):
tag1 = sentence[i][1]
if sentence[i][0] == "n":
sentence[i] = ("not", tag1)
elif sentence[i][0] == "ca":
sentence[i] = ("can", tag1)
return sentence
def normalize_word(word):
stemmer = PorterStemmer()
return stemmer.stem(word)
def normalize_sentence(sentence):
return list(map(lambda x: (normalize_word(x[0]), x[1]), sentence))
def apply_preproc(data_generator):
data_generator = list(map(lambda x: replace(x, "ca", "can"), data_generator))
data_generator = list(map(lambda x: replace(x, "s", "is"), data_generator))
data_generator = list(map(lambda x: replace(x, "ll", "will"), data_generator))
data_generator = list(map(lambda x: replace_num(x, "NUM"), data_generator))
data_generator = list(map(lambda x: replace_nt(x), data_generator))
data_generator = list(map(lambda x: normalize_sentence(x), data_generator))
return data_generator
train_data_generator = apply_preproc(train_data_generator)
test_data_generator = apply_preproc(test_data_generator)
train_tags, train_tokens = get_token_tags(train_data_generator)
test_tags, test_tokens = get_token_tags(test_data_generator)
gc.collect()
# checks
def checks(tokens):
assert "s" not in tokens, "'s' is still present"
assert "t" not in tokens, "'t' is still present"
assert "n" not in tokens, "'n' is still present"
assert "ca" not in tokens, "'ca' is still present"
assert "ll" not in tokens, "'ll' is still present"
checks(train_tokens)
checks(test_tokens)
"""# 3.0 Getting the data ready for Machine Learning
## 3.1 Approach
### Step 1: Setting up a baseline
The pipeline follows as
Sentence Level Embeddings > Classifier > Metrics and Reports
We try different embeddings like TF-IDF, Word2Vec and use Logistic Regression
#### Approach
For a sentence with N words, consider the k'th word. We use vectors of k-3,k-2,k-1,k+1,k+2,k+3 words. In case the word doesn't exist, we use a vector of zeros
"""
def stack(sentence, window=3):
y = [x[0] for x in sentence]
tokens = [x[1] for x in sentence]
X = []
for i in range(len(tokens)):
left_pad = max(window - i, 0)
right_pad = max(window - len(tokens) + i + 1, 0)
left_idx = window - left_pad
right_idx = window - right_pad
row = left_pad * ["PAD"] + tokens[i - left_idx:i] + tokens[i + 1:i + 1 + right_idx] + right_pad * ["PAD"]
X.append(row)
assert len(row) == 2 * window, "Length:{0}, Row:{1}, i:{2}".format(len(tokens), row, i)
return X, y
train_row_data = list(map(lambda x: stack(x, 3), train_data_generator))
train_rows = [item[0] for item in train_row_data]
train_labels = [item[1] for item in train_row_data]
test_row_data = list(map(lambda x: stack(x, 3), test_data_generator))
test_rows = [item[0] for item in test_row_data]
test_labels = [item[1] for item in test_row_data]
def rows_to_2d(rows, labels):
X = []
y = []
for row, label in zip(rows, labels):
for item, tag in zip(row, label):
X.append(item)
y.append(tag)
return X, y
train_X, train_y = rows_to_2d(train_rows, train_labels)
test_X, test_y = rows_to_2d(test_rows, test_labels)
assert len(train_X) == len(train_y)
assert len(test_X) == len(test_y)
gc.collect()
word2vec = KeyedVectors.load_word2vec_format('data/GoogleNews-vectors-negative300.bin.gz', limit=10000, binary=True)
#
word2vec_vocab = list(word2vec.wv.vocab.keys())
common = set(word2vec_vocab).intersection(set(flat_train_tokens_list))
vocab = set(flat_train_tokens_list)
percent_overlap = round(len(common) * 100 / len(vocab), 2)
print("Common vocab (word2vec, our vocabulary): ", percent_overlap, "%")
token_share = list(filter(lambda x: x in common, flat_train_tokens_list))
common_share = round(len(token_share) * 100 / len(flat_train_tokens_list), 2)
print("The common vocabulary covers:", common_share, "% of total tokens")
"""#### Observations so far
1. Pre-trained word2vec does not cover our vocabulary. This is obvious because our vocabulary contains a number of names. We don't expect the pretraind models to have all the names in the universe
2. We can move forward in one of the two options - 1) Make our own embeddings 2) Think of some workaround for the words that don't exist in the word2vec vocabulary
3. Let's brainstorm approach 2.
1. Another option is to replace OOV words with UNK (Unknown) token and then make embeddings
2. Another possible workaround is to use character embeddings to build word embeddings for OOV (Out of Vocab) words.
4. For approach 1
1. Make your own embeddings.
5. Pros and cons of approach 1.
1. Pro - It is easy to build our own embeddings because we can cover every word in the vocabulary.
2. Pro - We can have embeddings very specific to our movie data. Pre-trained embeddings are trained on a large dataset that doesn't necassarily relate to our data.
3. Con - It is hard to evaluate the quality of our embeddings because they are unsupervised vectors.
4. Con - It is a time consuming process to make our own embeddings
5. Con - The biggest reason is, what if the test data has some new names that don't exist in our training vocabulary? This is the biggest motivation for using character level embeddings.
6. Pros and cons of approach 2.
1. Pro - Replacing with UNK token is very easy
2. Con - We might lose a lot of information because the model will consider nearly 35% of training tokens to be the same.
3. Pro - character level embeddings can still preserve some information, compared to UNK token
#### Next Steps
Try one of the options
> Use pretrained word embeddings + Custom character embeddings
# 4.0 Baseline 1 - pretrained word embeddings + custom character embeddings
"""
# to train character embeddings, we take words not belonging to our word2vec vocab
char2vec = Word2Vec(vocab, size=300, sg=1, seed=42)
def get_wordvector(token, word_model=word2vec, char_model=char2vec):
print(token)
if token in word_model.wv.vocab:
return word_model.wv[token]
else:
vector = np.zeros(word_model.vector_size, )
if token == "PAD":
return vector
for char in token.lower():
vector = np.add(vector, char_model.wv[char])
return vector
def row2vec(row, word_model=word2vec, char_model=char2vec):
rowvec = [get_wordvector(token, word_model, char_model) for token in row]
length = word_model.vector_size * len(row)
return np.array(rowvec).reshape(length)
train_X = list(map(lambda x: row2vec(x), train_X))
test_X = list(map(lambda x: row2vec(x), test_X))
train_X = np.array(train_X).reshape(len(train_X), -1)
test_X = np.array(test_X).reshape(len(test_X), -1)
print(train_X.shape)
print(test_X.shape)
gc.collect()
"""The array is obviously very very big. We need to reduct the dimensions using some technique. We use PCA."""
pca = PCA(n_components=100)
train_X = pca.fit_transform(train_X)
test_X = pca.transform(test_X)
# let's delete things we don't need
label2class = dict(enumerate(set(train_y)))
class2label = {value: key for key, value in label2class.items()}
train_y = [class2label[tag] for tag in train_y]
test_y = [class2label[tag] for tag in test_y]
# X_train, X_test, y_train, y_test = train_test_split(X_reduced, y_labels, test_size=0.2, random_state=42)
classifier = RandomForestClassifier()
param_grid = {"n_estimators": [10, 20, 50], "max_depth": [20, 30], "min_samples_leaf": [3]}
cv = GridSearchCV(classifier, param_grid=param_grid, n_jobs=-1, scoring="f1_macro", cv=4)
cv.fit(train_X, train_y)
classifier = cv.best_estimator_
print(cv.best_params_)
print(cv.best_score_)
train_predictions = classifier.predict(train_X)
test_predictions = classifier.predict(test_X)
print("Train Accuracy: ", round(100 * accuracy_score(train_y, train_predictions), 2))
print("Test Accuracy: ", round(100 * accuracy_score(test_y, test_predictions), 2))
print("Train F1: ", round(100 * f1_score(train_y, train_predictions, average="macro"), 2))
print("Test F1: ", round(100 * f1_score(test_y, test_predictions, average="macro"), 2))
def confusion_matrix_plot(true, model, X, label2class=label2class, class2label=class2label):
true_classes = [label2class[value] for value in true]
labels = list(label2class.values())
plot_confusion_matrix(model, X, true, display_labels=labels, normalize='all', xticks_rotation="vertical")
plt.xlabel("Predicted")
plt.ylabel("True")
cm = confusion_matrix_plot(train_y, classifier, train_X)
confusion_matrix_plot(test_y, classifier, test_X)
# free up some space
del train_X, test_X
del train_y, test_y
del word2vec
del classifier
"""# 4.1 Analysis of Baseline
1. Overall accuracy, and the confusion matrix looks the same for both train and validation results.
2. Although the accuracy is 70%, it is not the most representative measure of the model performance (because of the class imbalance)
3. As expected, the algorithm only learn to predict the 'I-Plot' and the 'O' tag. It barely learns to predict anything else.
4. We can try more tuning and different models, but the purpose of the baseline is to just see how good a complex model does on a simple one.
# 5.0 NER Using Deep Learning
We try to make an LSTM to predict tags. The goal is to not make a State-Of-The-Art (SOTA) model but rather understand what works and what doesn't
## 5.1 Data for deep Learning
For N sentences of a uniform length M, our data is an array of shape N*M. Since each token has a label, our labels also look like an array with same shape.
### Problem
1. Out of Vocab Words (OOV) - how do we make embeddings for words that don't occur in Vocab
### Approach
1. To solve OOV problem -
1. Replace such words with <UNK> token. Initialize <UNK> embedding randomly
2. Use a third-party embedding like FastText
3. Make train your own subword embeddings and use them for such words
"""
def get_data(data_generator):
X = []
y = []
for pair in data_generator:
X.append([x[1] for x in pair])
y.append([x[0] for x in pair])
return X, y
X_train, y_train = get_data(train_data_generator)
X_test, y_test = get_data(test_data_generator)
# create a vocab to unique idx mapping and vice-versa
PADDING_TOKEN = "PAD"
UNKNOWN_TOKEN = "UNK"
labels = list(chain.from_iterable(y_train))
idx2label = dict(enumerate(set(labels)))
label2idx = {value: key for key, value in idx2label.items()}
# calculating weight for class weights. These weights are used in the cross-entropy loss
count_label = dict(Counter(chain.from_iterable(y_train)))
inverse_counts = {key: 1. / value for key, value in count_label.items()}
sum_inverse = np.sum([count for _, count in inverse_counts.items()])
inverse_normalized = {key: value / sum_inverse for key, value in inverse_counts.items()}
weights = np.array([0.3 + inverse_normalized[idx2label[i]] for i in range(len(idx2label))])
weights /= np.sum(weights)
# checks
def check2(X, y):
for _x, _y in zip(X, y):
assert len(_x) == len(_y)
check2(X_train, y_train)
check2(X_test, y_test)
# from our analysis previously, we take all the sentences with length 40 or below. For the ones less than 40, we pad. For the rest, we trim
MAX_LEN = 40
def trim(X, y, max_len=MAX_LEN):
sequence = []
for i in range(len(X)):
if len(X[i]) >= max_len:
X[i] = X[i][:max_len]
y[i] = y[i][:max_len]
sequence.append(max_len)
else:
sequence.append(len(X[i]))
return X, y, sequence
X_train, y_train, train_seq = trim(X_train, y_train, MAX_LEN)
X_test, y_test, test_seq = trim(X_test, y_test, MAX_LEN)
# checks
def check3(X, y, sequence):
assert len(X) == len(y)
assert len(X) == len(sequence)
for seq, tags in zip(X, y):
assert len(tags) <= MAX_LEN
assert len(seq) == len(tags)
check3(X_train, y_train, train_seq)
check3(X_test, y_test, test_seq)
inverse_normalized[PADDING_TOKEN] = 0
def replace_by_counts(tokens, max_count, replace_by):
'''
Replaces tokens with count<=max_counts by the token 'replace_by'
'''
counts = dict(Counter(tokens)).items()
vocab = [token if count > max_count else replace_by for token, count in counts]
return list(set(vocab))
#
# create a vocab to unique idx mapping and vice-versa
tokens = list(chain.from_iterable(X_train))
vocab = replace_by_counts(tokens, 4, UNKNOWN_TOKEN)
vocab.append(PADDING_TOKEN)
idx2word = dict(enumerate(set(vocab)))
word2idx = {value: key for key, value in idx2word.items()}
labels = list(chain.from_iterable(y_train))
labels.append(PADDING_TOKEN)
idx2label = dict(enumerate(set(labels)))
label2idx = {value: key for key, value in idx2label.items()}
weights = np.array([0.3 + inverse_normalized[idx2label[i]] for i in range(len(idx2label))])
weights /= np.sum(weights)
# replace tokens by their indices from the dictionary. Same for labels
def unk_map(x, token2idx, unk):
'''
Replace tokens by unk token idx if they are not in the vocabulary
'''
idx = []
for word in x:
if word not in token2idx.keys():
idx.append(unk)
else:
idx.append(token2idx[word])
return idx
unk_idx = word2idx["UNK"]
X_train = list(map(lambda x: unk_map(x, word2idx, unk_idx), X_train))
y_train = list(map(lambda x: [label2idx[word] for word in x], y_train))
X_test = list(map(lambda x: unk_map(x, word2idx, unk_idx), X_test))
y_test = list(map(lambda x: [label2idx[word] for word in x], y_test))
# creating pytorch dataset for iteration and generating batches
class NERDataset(Dataset):
def __init__(self, X, y, sequence):
self.X = X
self.y = y
self.seq = sequence
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx], self.seq[idx]
def pad(X, Y, seq, pad_idx, pad_label, max_len):
'''
add pad_idx to tokens and pad_label to labels is correspinding PAD tokens
'''
X_padd = []
y_padd = []
for x, y in zip(X, Y):
x_len = max_len - len(x)
X_padd.append(x + [pad_idx] * x_len)
y_padd.append(y + [pad_label] * x_len)
X_padd = torch.LongTensor(X_padd)
y_padd = torch.LongTensor(y_padd)
seq = torch.LongTensor(seq)
return X_padd, y_padd, seq
X_train, y_train, train_seq = pad(X_train, y_train, train_seq, word2idx["PAD"], label2idx["PAD"], MAX_LEN)
X_test, y_test, test_seq = pad(X_test, y_test, test_seq, word2idx["PAD"], label2idx["PAD"], MAX_LEN)
train_dataset = NERDataset(X_train, y_train, train_seq)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_dataset = NERDataset(X_test, y_test, test_seq)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=True)
gc.collect()
"""Now our data is ready for deep learning
## 5.2 Model Building - LSTM
"""
class NERLstm(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size, padding_idx, max_len, num_layers):
super(NERLstm, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True, bidirectional=True)
self.linear = nn.Linear(2 * hidden_dim, tagset_size)
self.padding_idx = padding_idx
self.max_len = max_len
self.tagset_size = tagset_size
self.padding_idx = padding_idx
def forward(self, X, seq):
embeddings = self.word_embeddings(X)
packed_input = pack_padded_sequence(embeddings, seq, batch_first=True, enforce_sorted=False)
packed_output, (ht, ct) = self.lstm(packed_input)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
output = self.linear(output)
return output
config = {
'embedding_dim': 128,
'hidden_dim': 1024,
'vocab_size': 100,
'tagset_size': len(idx2label),
'padding_idx': 0,
'max_len': 3,
"num_layers": 2
}
# test
def test_model():
'''
Testing the model
'''
test_model = NERLstm(**config)
test_X = torch.LongTensor([[1, 5, 3], [4, 0, 0]])
test_y = torch.LongTensor([[1, 2, 3], [4, 5, 5]])
test_sequence = torch.LongTensor([3, 1])
test_output = test_model(test_X, test_sequence)
print(test_output.shape)
test_model()
def loss(true, pred, pad_idx, target_size, max_len, weights=None, device="cpu"):
'''
Calculate loss without taking PAD loss into account
'''
batch_size = pred.shape[0]
max_batch = pred.shape[1]
weights = torch.Tensor(weights)
weights = weights.to(device)
loss_fn = nn.CrossEntropyLoss(reduction='none', weight=weights)
pred = pred[:, :max_batch, :]
true = true[:, :max_batch].contiguous()
true = true.view(-1)
pred = pred.view(-1, target_size)
loss = loss_fn(pred, true)
mask = true != pad_idx
mask = mask.view(-1).type(torch.FloatTensor).to(device)
mask /= mask.shape[0]
return torch.dot(mask, loss) / torch.sum(mask)
def generate_predictions(model, X, seq, device="cuda"):
X = X.to(device)
seq = seq.to(device)
pred = model(X, seq)
pred_labels = torch.argmax(pred, 2)
pred_labels = pred_labels.view(-1)
return pred_labels
def accuracy(model, X, seq, y_true, pad_idx, device):
y_true = y_true.view(-1)
y_pred = generate_predictions(model, X, seq, device)
mask = y_true != pad_idx
mask = mask.type(torch.FloatTensor)
matches = y_pred == y_true
matches = matches.type(torch.FloatTensor)
correct = torch.dot(matches, mask)
total = len(y_pred)
accuracy = correct.item() / total
return accuracy
# testing masked loss
# loss(test_y, test_output, word2idx["PAD"], config["tagset_size"], config["max_len"])
"""## 5.3 Training Loop"""
EMBEDDING_DIM = 300
HIDDEN_DIM = 512
VOCAB_SIZE = len(word2idx)
TAGSET_SIZE = len(idx2label)
BATCH_SIZE = 128
LEARNING_RATE = 10e-3
EPOCHS = 22
NUM_LAYERS = 3
config = {
'embedding_dim': EMBEDDING_DIM,
'hidden_dim': HIDDEN_DIM,
'vocab_size': VOCAB_SIZE,
'tagset_size': TAGSET_SIZE,
'padding_idx': word2idx["PAD"],
'max_len': MAX_LEN,
'num_layers': NUM_LAYERS
}
if torch.cuda.is_available:
device = "cuda"
else:
device = "cpu"
model = NERLstm(**config)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
train_loss = []
test_loss = []
test_accuracy = []
train_accuracy = []
for i in range(EPOCHS):
epoch_loss = 0
model = model.train()
LEARNING_RATE *= 0.8
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
for j, (X, y, sequence) in enumerate(train_loader):
optimizer.zero_grad()
X = X.to(device)
y = y.to(device)
# print(X.shape, y.shape, sequence.shape)
sequence = sequence.to(device)
pred = model(X, sequence)
loss_value = loss(y, pred, config['padding_idx'], config['tagset_size'], config["max_len"], weights=weights,
device=device)
loss_value.backward()
# print(j ,": ",round(loss_value.item(),2))
epoch_loss += loss_value.item()
optimizer.step()
del X, y, sequence
torch.cuda.empty_cache()
model = model.eval()
train_loss.append(round(epoch_loss, 3))
X_test = X_test.to(device)
y_test = y_test.to(device)
test_seq = test_seq.to(device)
y_pred = model(X_test, test_seq)
test_epoch_loss = loss(y_test, y_pred, config['padding_idx'], config['tagset_size'], config["max_len"], weights,
device)
test_epoch_loss = round(test_epoch_loss.item(), 3)
test_loss.append(test_epoch_loss)
test_epoch_acc = round(accuracy(model, X_test, test_seq, y_test, config["padding_idx"], device), 3)
test_accuracy.append(test_epoch_acc)
print("-----------Epoch: {}-----------".format(i + 1))
print("Loss:\ntrain:{0}\ntest:{1}\n".format(round(epoch_loss, 2), test_epoch_loss))
print("Accuracy:\ntest:{0}\n".format(test_epoch_acc))
plt.plot(test_accuracy)
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.plot(np.array(test_loss) / len(X_test))
# X,y,sequence = next(iter(train_loader))
plt.plot(np.array(train_loss) / len(X_train))
def F1_scores(y_true, y_pred, idx, pad_idx):
y_pred = torch.argmax(y_pred, 2)
y_true = y_true.to("cpu").type(torch.LongTensor).numpy().reshape(-1)
y_pred = y_pred.to("cpu").type(torch.LongTensor).numpy().reshape(-1)
tp = 0
tn = 0
fp = 0
fn = 0
p = 0
n = 0
for i, true in enumerate(y_true):
if true != pad_idx:
pred = y_pred[i]
if true == idx:
p += 1
if pred == idx:
tp += 1
else:
fn += 1
else:
n += 1
if pred == idx:
fp += 1
else:
tn += 1
precision = tp / (tp + fp + 0.0001)
recall = tp / (tp + fn + 0.0001)
f1 = 2 * precision * recall / (precision + recall + 0.0001)
return round(f1, 3), round(precision, 3), round(recall, 3)
total = 0
for tag, idx in label2idx.items():
f1, prec, rec = F1_scores(y_test, y_pred, label2idx[tag], label2idx["PAD"])
print(tag + " stats: " + "precision: ", prec, " recall: ", rec, " F1: ", f1)
total += f1
print("------ Average: {} ------".format(total / len(label2idx)))
# analysis
def analyze(y_pred, y_true, X_test):
y_pred = torch.argmax(y_pred, 2)
y_true = y_true.to("cpu").type(torch.LongTensor).numpy().reshape(-1)
y_pred = y_pred.to("cpu").type(torch.LongTensor).numpy().reshape(-1)
X_test = X_test.to("cpu").type(torch.LongTensor).numpy().reshape(-1)
where_incorrect = y_true != y_pred
incorrect_idxes = np.where(where_incorrect == 1)[0]
incorrect_tokens = X_test[incorrect_idxes]
return dict(Counter(incorrect_tokens))
incorrect_dict = analyze(y_pred, y_test, X_test)
incorrect_dict = sorted(incorrect_dict.items(), key=lambda x: x[1], reverse=True)
for idx, count in incorrect_dict[:20]:
print(idx2word[idx], " ----> ", count)
def predict_tags(sentence, model=model, word2idx=word2idx,
idx2word=idx2word, label2idx=label2idx, idx2label=idx2label):
tokens = sentence.lower().split()
stemmer = PorterStemmer()
stemmed_tokens = [stemmer.stem(token) for token in tokens]
length = len(tokens)
tokens_idx = []
for token in stemmed_tokens:
if token not in word2idx.keys():
tokens_idx.append(word2idx["UNK"])
else:
tokens_idx.append(word2idx[token])
tokens_idx = torch.LongTensor(tokens_idx).unsqueeze(0)
sequence = torch.LongTensor([length])
predictions = generate_predictions(model, tokens_idx, sequence)
for token, label in zip(tokens, predictions):
print(token, " ----> ", idx2label[label.item()])
predict_tags("Swades starring shahrukh khan describes the state of rural india very well")
predict_tags("Amir khan plays mahavir phogat in the real life based film dangal")
predict_tags("sholay is said to be one of the greates film of its time")