Skip to content

Commit a20d312

Browse files
committed
skpkg: apply black to all files in the project dir
1 parent a731051 commit a20d312

File tree

9 files changed

+212
-70
lines changed

9 files changed

+212
-70
lines changed

.github/ISSUE_TEMPLATE/release_checklist.md

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,30 +6,41 @@ labels: "release"
66
assignees: ""
77
---
88

9-
### PyPI/GitHub release checklist:
9+
### PyPI/GitHub rc-release preparation checklist:
1010

1111
- [ ] All PRs/issues attached to the release are merged.
1212
- [ ] All the badges on the README are passing.
1313
- [ ] License information is verified as correct. If you are unsure, please comment below.
1414
- [ ] Locally rendered documentation contains all appropriate pages, including API references (check no modules are
15-
missing), tutorials, and other human written text is up-to-date with any changes in the code.
16-
- [ ] Installation instructions in the README, documentation and on the website (e.g., diffpy.org) are updated.
15+
missing), tutorials, and other human-written text is up-to-date with any changes in the code.
16+
- [ ] Installation instructions in the README, documentation, and the website are updated.
1717
- [ ] Successfully run any tutorial examples or do functional testing with the latest Python version.
1818
- [ ] Grammar and writing quality are checked (no typos).
19+
- [ ] Install `pip install build twine`, run `python -m build` and `twine check dist/*` to ensure that the package can be built and is correctly formatted for PyPI release.
1920

20-
Please mention @sbillinge here when you are ready for PyPI/GitHub release. Include any additional comments necessary, such as
21-
version information and details about the pre-release here:
21+
Please tag the maintainer (e.g., @username) in the comment here when you are ready for the PyPI/GitHub release. Include any additional comments necessary, such as version information and details about the pre-release here:
2222

23-
### conda-forge release checklist:
23+
### PyPI/GitHub full-release preparation checklist:
2424

25-
<!-- After @sbillinge releases the PyPI package, please check the following when creating a PR for conda-forge release.-->
25+
- [ ] Create a new conda environment and install the rc from PyPI (`pip install <package-name>==??`)
26+
- [ ] License information on PyPI is correct.
27+
- [ ] Docs are deployed successfully to `https://<github-username-or-orgname>/<package-name>`.
28+
- [ ] Successfully run all tests, tutorial examples or do functional testing.
2629

30+
Please let the maintainer know that all checks are done and the package is ready for full release.
31+
32+
### conda-forge release preparation checklist:
33+
34+
<!-- After the maintainer releases the PyPI package, please check the following when creating a PR for conda-forge release.-->
35+
36+
- [ ] Ensure that the full release has appeared on PyPI successfully.
2737
- [ ] New package dependencies listed in `conda.txt` and `test.txt` are added to `meta.yaml` in the feedstock.
28-
- [ ] All relevant issues in the feedstock are addressed in the release PR.
38+
- [ ] Close any open issues on the feedstock. Reach out to the maintainer if you have questions.
39+
- [ ] Tag the maintainer for conda-forge release.
2940

3041
### Post-release checklist
3142

3243
<!-- Before closing this issue, please complete the following: -->
3344

34-
- [ ] Run tutorial examples and conduct functional testing using the installation guide in the README. Attach screenshots/results as comments.
35-
- [ ] Documentation (README, tutorials, API references, and websites) is deployed without broken links or missing figures.
45+
- [ ] Run tutorial examples and conduct functional testing using the installation guide in the README. Attach screenshots/results as comments.
46+
- [ ] Documentation (README, tutorials, API references, and websites) is deployed without broken links or missing figures.

doc/source/conf.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,13 @@
221221
# (source start file, target name, title,
222222
# author, documentclass [howto, manual, or own class]).
223223
latex_documents = [
224-
("index", "diffpy.nmf_mapping.tex", "diffpy.nmf_mapping Documentation", ab_authors, "manual"),
224+
(
225+
"index",
226+
"diffpy.nmf_mapping.tex",
227+
"diffpy.nmf_mapping Documentation",
228+
ab_authors,
229+
"manual",
230+
),
225231
]
226232

227233
# The name of an image file (relative to this directory) to place at the top of
@@ -249,7 +255,15 @@
249255

250256
# One entry per manual page. List of tuples
251257
# (source start file, name, description, authors, manual section).
252-
man_pages = [("index", "diffpy.nmf_mapping", "diffpy.nmf_mapping Documentation", ab_authors, 1)]
258+
man_pages = [
259+
(
260+
"index",
261+
"diffpy.nmf_mapping",
262+
"diffpy.nmf_mapping Documentation",
263+
ab_authors,
264+
1,
265+
)
266+
]
253267

254268
# If true, show URL addresses after external links.
255269
# man_show_urls = False

src/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,7 @@
1313
#
1414
##############################################################################
1515

16-
"""nmf_mapping - tools for performing NMF on PDF and XRD data.
17-
"""
16+
"""nmf_mapping - tools for performing NMF on PDF and XRD data."""
1817

1918

2019
__import__("pkg_resources").declare_namespace(__name__)

src/diffpy/nmf_mapping/main.py

Lines changed: 109 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -31,21 +31,32 @@ def main(args=None):
3131
as well as the reconstruction error as a fxn of component
3232
"""
3333

34-
parser = ArgumentParser(prog="nmf_mapping", description=_BANNER, formatter_class=RawTextHelpFormatter)
34+
parser = ArgumentParser(
35+
prog="nmf_mapping",
36+
description=_BANNER,
37+
formatter_class=RawTextHelpFormatter,
38+
)
3539

3640
def tup(s):
3741
if not isinstance(s, str):
38-
raise TypeError("Input must be a string of two integers separated by a comma.")
42+
raise TypeError(
43+
"Input must be a string of two integers separated by a comma."
44+
)
3945

4046
try:
4147
l, h = map(int, s.split(","))
4248
return l, h
4349
except ValueError:
44-
raise ValueError("Input must be two integers separated by a comma (e.g., '1,5')")
50+
raise ValueError(
51+
"Input must be two integers separated by a comma (e.g., '1,5')"
52+
)
4553

4654
# args
4755
parser.add_argument(
48-
"directory", default=None, type=str, help="a directory of PDFs to calculate NMF decomposition"
56+
"directory",
57+
default=None,
58+
type=str,
59+
help="a directory of PDFs to calculate NMF decomposition",
4960
)
5061
group = parser.add_mutually_exclusive_group()
5162
parser.add_argument(
@@ -88,15 +99,19 @@ def tup(s):
8899
"--xrd",
89100
default=False,
90101
type=boolean_string,
91-
help="whether to look for .xy files rather than .gr files\n" "default: False\n" "e.g. --xrd True",
102+
help="whether to look for .xy files rather than .gr files\n"
103+
"default: False\n"
104+
"e.g. --xrd True",
92105
)
93106
parser.add_argument(
94107
"--x_units",
95108
default=None,
96109
type=str,
97110
choices=["twotheta", "q"],
98111
required="--xrd" in sys.argv,
99-
help="x axis units for XRD data\n" "default: None\n" "e.g. --x_units twotheta",
112+
help="x axis units for XRD data\n"
113+
"default: None\n"
114+
"e.g. --x_units twotheta",
100115
)
101116
parser.add_argument(
102117
"--xrange",
@@ -105,25 +120,37 @@ def tup(s):
105120
nargs="*",
106121
help="the x-range over which to calculate NMF, can be multiple ranges (e.g. --xrange 5,10 12,15)",
107122
)
108-
parser.add_argument("--show", default=True, type=boolean_string, help="whether to show the plot")
123+
parser.add_argument(
124+
"--show",
125+
default=True,
126+
type=boolean_string,
127+
help="whether to show the plot",
128+
)
109129
args0 = Namespace()
110130
args1, _ = parser.parse_known_args(args, namespace=args0)
111131

112132
input_list, data_list = nmf.load_data(args1.directory, args1.xrd)
113133
if args1.pca_thresh:
114-
df_components, df_component_weight_timeseries, df_reconstruction_error, df_explained_var_ratio = (
115-
nmf.NMF_decomposition(
116-
input_list,
117-
args1.xrange,
118-
args1.threshold,
119-
additional_comp=False,
120-
improve_thresh=args1.improve_thresh,
121-
n_iter=args1.n_iter,
122-
pca_thresh=args1.pca_thresh,
123-
)
134+
(
135+
df_components,
136+
df_component_weight_timeseries,
137+
df_reconstruction_error,
138+
df_explained_var_ratio,
139+
) = nmf.NMF_decomposition(
140+
input_list,
141+
args1.xrange,
142+
args1.threshold,
143+
additional_comp=False,
144+
improve_thresh=args1.improve_thresh,
145+
n_iter=args1.n_iter,
146+
pca_thresh=args1.pca_thresh,
124147
)
125148
else:
126-
df_components, df_component_weight_timeseries, df_reconstruction_error = nmf.NMF_decomposition(
149+
(
150+
df_components,
151+
df_component_weight_timeseries,
152+
df_reconstruction_error,
153+
) = nmf.NMF_decomposition(
127154
input_list,
128155
args1.xrange,
129156
args1.threshold,
@@ -134,7 +161,9 @@ def tup(s):
134161

135162
print(f"Number of components: {len(df_components.columns)}")
136163

137-
fig1 = nmf.component_plot(df_components, args1.xrd, args1.x_units, args1.show)
164+
fig1 = nmf.component_plot(
165+
df_components, args1.xrd, args1.x_units, args1.show
166+
)
138167
fig2 = nmf.component_ratio_plot(df_component_weight_timeseries, args1.show)
139168
fig3 = nmf.reconstruction_error_plot(df_reconstruction_error, args1.show)
140169
if args1.pca_thresh:
@@ -143,40 +172,72 @@ def tup(s):
143172
if args1.save_files:
144173
if not os.path.exists(os.path.join(os.getcwd(), "nmf_result")):
145174
os.mkdir(os.path.join(os.getcwd(), "nmf_result"))
146-
output_fn = datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S%f")
147-
df_components.to_json(os.path.join(os.getcwd(), "nmf_result", "x_index_vs_y_col_components.json"))
175+
output_fn = datetime.fromtimestamp(time.time()).strftime(
176+
"%Y%m%d%H%M%S%f"
177+
)
178+
df_components.to_json(
179+
os.path.join(
180+
os.getcwd(), "nmf_result", "x_index_vs_y_col_components.json"
181+
)
182+
)
148183
df_component_weight_timeseries.to_json(
149-
os.path.join(os.getcwd(), "nmf_result", "component_index_vs_pratio_col.json")
184+
os.path.join(
185+
os.getcwd(), "nmf_result", "component_index_vs_pratio_col.json"
186+
)
150187
)
151188
df_component_weight_timeseries.to_csv(
152-
os.path.join(os.getcwd(), "nmf_result", output_fn + "component_row_pratio_col.txt"),
189+
os.path.join(
190+
os.getcwd(),
191+
"nmf_result",
192+
output_fn + "component_row_pratio_col.txt",
193+
),
153194
header=None,
154195
index=False,
155196
sep=" ",
156197
mode="a",
157198
)
158199
df_reconstruction_error.to_json(
159-
os.path.join(os.getcwd(), "nmf_result", "component_index_vs_RE_value.json")
200+
os.path.join(
201+
os.getcwd(), "nmf_result", "component_index_vs_RE_value.json"
202+
)
203+
)
204+
plot_file1 = os.path.join(
205+
os.getcwd(), "nmf_result", output_fn + "comp_plot.png"
206+
)
207+
plot_file2 = os.path.join(
208+
os.getcwd(), "nmf_result", output_fn + "ratio_plot.png"
209+
)
210+
plot_file3 = os.path.join(
211+
os.getcwd(), "nmf_result", output_fn + "loss_plot.png"
160212
)
161-
plot_file1 = os.path.join(os.getcwd(), "nmf_result", output_fn + "comp_plot.png")
162-
plot_file2 = os.path.join(os.getcwd(), "nmf_result", output_fn + "ratio_plot.png")
163-
plot_file3 = os.path.join(os.getcwd(), "nmf_result", output_fn + "loss_plot.png")
164213
if args1.pca_thresh:
165-
plot_file7 = os.path.join(os.getcwd(), "nmf_result", output_fn + "pca_var_plot.png")
214+
plot_file7 = os.path.join(
215+
os.getcwd(), "nmf_result", output_fn + "pca_var_plot.png"
216+
)
166217
plot_file4 = os.path.splitext(plot_file1)[0] + ".pdf"
167218
plot_file5 = os.path.splitext(plot_file2)[0] + ".pdf"
168219
plot_file6 = os.path.splitext(plot_file3)[0] + ".pdf"
169220
if args1.pca_thresh:
170221
plot_file8 = os.path.splitext(plot_file7)[0] + ".pdf"
171-
txt_file = os.path.join(os.getcwd(), "nmf_result", output_fn + "_meta" + ".txt")
222+
txt_file = os.path.join(
223+
os.getcwd(), "nmf_result", output_fn + "_meta" + ".txt"
224+
)
172225
with open(txt_file, "w+") as fi:
173226
fi.write("NMF Analysis\n\n")
174-
fi.write(f"{len(df_component_weight_timeseries.columns)} files uploaded for analysis.\n\n")
227+
fi.write(
228+
f"{len(df_component_weight_timeseries.columns)} files uploaded for analysis.\n\n"
229+
)
175230
fi.write(f"The selected active r ranges are: {args1.xrange} \n\n")
176231
fi.write("Thesholding:\n")
177-
fi.write(f"\tThe input component threshold was: {args1.threshold}\n")
178-
fi.write(f"\tThe input improvement threshold was: {args1.improve_thresh}\n")
179-
fi.write(f"\tThe input # of iterations to run was: {args1.n_iter}\n")
232+
fi.write(
233+
f"\tThe input component threshold was: {args1.threshold}\n"
234+
)
235+
fi.write(
236+
f"\tThe input improvement threshold was: {args1.improve_thresh}\n"
237+
)
238+
fi.write(
239+
f"\tThe input # of iterations to run was: {args1.n_iter}\n"
240+
)
180241
fi.write(f"\tWas PCA thresholding used?: {args1.pca_thresh}\n")
181242
fi.write(f"{len(df_components.columns)} components were extracted")
182243

@@ -192,11 +253,17 @@ def tup(s):
192253
fig4.savefig(plot_file8)
193254
columns = df_components.columns
194255
for i, col in enumerate(columns):
195-
data = np.column_stack([df_components.index.to_list(), df_components[col].to_list()])
256+
data = np.column_stack(
257+
[df_components.index.to_list(), df_components[col].to_list()]
258+
)
196259

197260
if args1.xrd:
198261
np.savetxt(
199-
os.path.join(os.getcwd(), "nmf_result", output_fn + f"_comp{i}" + ".xy"),
262+
os.path.join(
263+
os.getcwd(),
264+
"nmf_result",
265+
output_fn + f"_comp{i}" + ".xy",
266+
),
200267
data,
201268
header=f"NMF Generated XRD\nSource = nmfMapping\n"
202269
f"Date = {output_fn}\n{args1.x_units} Intensity\n",
@@ -205,9 +272,14 @@ def tup(s):
205272
)
206273
else:
207274
np.savetxt(
208-
os.path.join(os.getcwd(), "nmf_result", output_fn + f"_comp{i}" + ".cgr"),
275+
os.path.join(
276+
os.getcwd(),
277+
"nmf_result",
278+
output_fn + f"_comp{i}" + ".cgr",
279+
),
209280
data,
210-
header=f"NMF Generated PDF\nSource: nmfMapping\n" f"Date: {output_fn}\nr g",
281+
header=f"NMF Generated PDF\nSource: nmfMapping\n"
282+
f"Date: {output_fn}\nr g",
211283
fmt="%s",
212284
)
213285

0 commit comments

Comments
 (0)