-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcorpus_embed.py
85 lines (61 loc) · 2.78 KB
/
corpus_embed.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import numpy as np
from tqdm import tqdm
from corpus import WikipediaCorpus
from embeddings import EmbeddingModel
EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
# batch of strings to embed
BATCH_SIZE = 32
# max number of embedded strings to store in a single file
MAX_VECTORS_PER_FILE = 65536
# only take the first N characters from each document text (we don't do chunking / multiple chunks per document)
FIRST_N_CHARS = 1000
# max number of articles to embed
MAX_ARTICLES = None
# the output file path/pattern
OUTPUT_FILE_PATTERN = "embeddings/wikipedia_embeddings_{i}.npy"
def wikipedia_texts(wikipedia, cut_off_chars_position):
for _, wikipedia_document in wikipedia:
yield wikipedia_document['text'][:cut_off_chars_position]
def batch_generator(generator, max_total, determine_batch_size):
total = 0
batch = []
batch_size = determine_batch_size()
for element in generator:
if total == max_total:
break
else:
total = total + 1
batch.append(element)
if len(batch) == batch_size:
yield batch
batch = []
batch_size = determine_batch_size()
if batch:
yield batch
if __name__ == '__main__':
embedding_model = EmbeddingModel(EMBEDDING_MODEL_NAME)
corpus = WikipediaCorpus()
max_articles = corpus.num_entries() if MAX_ARTICLES is None else min(MAX_ARTICLES, corpus.num_entries())
num_batches = max_articles // BATCH_SIZE
print(f"Embedding {max_articles} of {corpus.num_entries()} wikipedia documents in {num_batches} batches, "
f"cutting text off at {FIRST_N_CHARS} characters.")
current_embeddings = []
def next_batch_size():
return min(BATCH_SIZE, MAX_VECTORS_PER_FILE - len(current_embeddings))
progress_bar = tqdm(total=max_articles)
file_number = 0
for batch in batch_generator(generator=wikipedia_texts(corpus.iterator(), cut_off_chars_position=FIRST_N_CHARS),
max_total=max_articles,
determine_batch_size=next_batch_size):
embeddings = embedding_model.tokenize_and_embed(batch)
current_embeddings.append(embeddings.numpy(force=True))
progress_bar.update(len(embeddings))
total_current_embeddings = sum(len(embeddings_batch) for embeddings_batch in current_embeddings)
if total_current_embeddings == MAX_VECTORS_PER_FILE:
with open(f"wikipedia_embeddings_{file_number}.npy", "wb") as f:
np.save(f, np.concatenate(current_embeddings, axis=0))
file_number += 1
current_embeddings = []
if len(current_embeddings) > 0:
with open(f"wikipedia_embeddings_{file_number}.npy", "wb") as f:
np.save(f, np.concatenate(current_embeddings, axis=0))