-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathhelper.py
55 lines (42 loc) · 1.71 KB
/
helper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import numpy as np
import pandas as pd
import re
import string
import pickle
from nltk.stem import PorterStemmer
ps = PorterStemmer()
with open('static/model/model.pickle', 'rb') as f:
model = pickle.load(f)
with open('static/model/corpora/stopwords/english', 'r') as file:
sw = file.read().splitlines()
vocab = pd.read_csv('static/model/vocabulary.txt', header=None)
tokens = vocab[0].tolist()
def remove_punctuations(text):
for punctuation in string.punctuation:
text = text.replace(punctuation, '')
return text
def preprocessing(text):
data = pd.DataFrame([text], columns=['tweet'])
data["tweet"] = data["tweet"].apply(lambda x: " ".join(x.lower() for x in x.split()))
data["tweet"] = data["tweet"].apply(lambda x: " ".join(re.sub(r'^https?:\/\/.*[r\r\n]*', '', x, flags=re.MULTILINE) for x in x.split()))
data["tweet"] = data["tweet"].apply(remove_punctuations)
data["tweet"] = data['tweet'].str.replace('\d+', '', regex=True)
data["tweet"] = data["tweet"].apply(lambda x: " ".join(x for x in x.split() if x not in sw))
data["tweet"] = data["tweet"].apply(lambda x: " ".join(ps.stem(x) for x in x.split()))
return data['tweet']
def vectorizer(ds):
vectorized_lst = []
for sentence in ds:
sentence_lst = np.zeros(len(tokens))
for i in range(len(tokens)):
if tokens[i] in sentence.split():
sentence_lst[i] = 1
vectorized_lst.append(sentence_lst)
vectorized_lst_new = np.asanyarray(vectorized_lst, dtype=np.float32)
return vectorized_lst_new
def get_prediction(vectorized_text):
prediction = model.predict(vectorized_text)
if prediction == 1:
return 'negative'
else:
return 'positive'