forked from illiterate/BertClassifier
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdataset.py
36 lines (31 loc) · 1.44 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import Dataset
from transformers import BertTokenizer
from tqdm import tqdm
class CNewsDataset(Dataset):
def __init__(self, filename):
self.labels = ['体育', '娱乐', '家居', '房产', '教育', '时尚', '时政', '游戏', '科技', '财经']
self.tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
self.input_ids = []
self.token_type_ids = []
self.attention_mask = []
self.label_id = []
self.load_data(filename)
def load_data(self, filename):
print('loading data from:', filename)
with open(filename, 'r', encoding='utf-8') as wf:
lines = wf.readlines()
for line in tqdm(lines):
label, text = line.strip().split('\t')
label_id = self.labels.index(label)
token = self.tokenizer(text, add_special_tokens=True, padding='max_length', truncation=True, max_length=512)
self.input_ids.append(np.array(token['input_ids']))
self.token_type_ids.append(np.array(token['token_type_ids']))
self.attention_mask.append(np.array(token['attention_mask']))
self.label_id.append(label_id)
def __getitem__(self, index):
return self.input_ids[index], self.token_type_ids[index], self.attention_mask[index], self.label_id[index]
def __len__(self):
return len(self.input_ids)