-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathcontextProof.py
222 lines (170 loc) · 5.57 KB
/
contextProof.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
# Copyright 2023 Adobe
# All Rights Reserved.
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it.
'''
Creates example pages for usage of a specific letter or letter combination.
Needs a word list as an input file, such as the word lists available at
https://github.com/hermitdave/FrequencyWords/tree/master/content/2016
Input: font file(s) or folder of fonts.
'''
import os
import sys
import argparse
import subprocess
import drawBot as db
from proofing_helpers.files import get_font_paths
from proofing_helpers.globals import *
from proofing_helpers.fontSorter import sort_fonts
from proofing_helpers.stamps import timestamp
default_wl = Path(__file__).parent / "_content" / "en_10k.txt"
def get_options():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-p', '--point_size',
default=20,
action='store',
type=int,
help='font size')
parser.add_argument(
'-w', '--wordlist',
default=default_wl,
action='store',
help='wordlist file')
parser.add_argument(
'-d', '--date',
default=False,
action='store_true',
help='date output file')
parser.add_argument(
'-a', '--word_amount',
default=300,
action='store',
type=int,
help='max example words/page')
parser.add_argument(
'-k', '--kerning_off',
default=False,
action='store_true',
help='switch off kerning')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'-l', '--letters',
action='store',
help='letter(s) to proof')
group.add_argument(
'-c', '--combination',
action='store',
help='combination to proof')
parser.add_argument(
'input',
nargs='+',
help='input font file(s)')
return parser.parse_args()
def extract_x_words(wordlist_path, depth=1000):
'''
Extract `depth` first lines from a word list.
This is based on the `FrequencyWords` lists, which follow this syntax:
(word | word count)
die 5453
ek 4892
nie 4499
Only the word, not the count is included in the output list.
'''
with open(wordlist_path, 'r') as f:
data = f.read().splitlines()
data = [word for word in data if len(word) >= 4]
if len(data) > depth:
data = data[:depth]
data = [word.split(' ')[0] for word in data]
return data
def filter_wordlist(wordlist_path, letters='', combination=False):
'''
Filter wordlist by desired letter or combination.
'''
all_words = extract_x_words(wordlist_path, depth=30000)
if letters:
if combination is True:
if letters.istitle():
all_words.extend([word.title() for word in all_words])
if letters.isupper():
all_words.extend([word.upper() for word in all_words])
return [word for word in all_words if letters in word]
else:
return [word for word in all_words if set(letters) & set(word)]
else:
return all_words
def make_proof(args, content, font_paths, output_path):
db.newDrawing()
MARGIN = 30
base_names = [os.path.basename(font) for font in font_paths]
if args.kerning_off:
kerning_flag = ' (no kerning) '
fea_dict = {'kern': False}
else:
kerning_flag = ' '
fea_dict = {}
for font_index, font in enumerate(font_paths):
font_name = base_names[font_index]
db.newPage('LetterLandscape')
stamp = db.FormattedString(
'{}{}| {}'.format(
font_name, kerning_flag,
timestamp(readable=True)),
font=FONT_MONO,
fontSize=8,
align='right')
db.text(stamp, (db.width() - MARGIN, MARGIN * 2 / 3))
fs = db.FormattedString(
content,
font=font,
fontSize=args.point_size,
fallbackFont=ADOBE_BLANK,
openTypeFeatures=fea_dict,
)
db.textBox(fs, (
MARGIN, MARGIN,
db.width() - 2 * MARGIN,
db.height() - 2 * MARGIN
))
db.saveImage(output_path)
db.endDrawing()
subprocess.call(['open', os.path.expanduser(output_path)])
def make_output_path(args):
if args.letters:
if len(args.letters) > 1:
flag = 'letters'
else:
flag = 'letter'
output_name = f'contextProof ({flag} {args.letters})'
else:
output_name = f'contextProof (combination {args.combination})'
if args.date:
output_name = f'{timestamp()} ' + output_name
return Path(f'~/Desktop/{output_name}.pdf').expanduser()
if __name__ == '__main__':
args = get_options()
output_path = make_output_path(args)
font_list = []
for input_path in args.input:
font_list.extend(get_font_paths(input_path))
input_paths = sort_fonts(font_list)
limit = args.word_amount
if args.letters:
req_chars = args.letters
combo_mode = False
else:
req_chars = args.combination
combo_mode = True
if os.path.exists(args.wordlist):
content = filter_wordlist(args.wordlist, req_chars, combo_mode)
else:
sys.exit('No default word list found.')
if content:
make_proof(args, ' '.join(content[:limit]), input_paths, output_path)
else:
sys.exit(f'no words for {req_chars} found')