-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathsession_shared.py
More file actions
321 lines (290 loc) · 11 KB
/
session_shared.py
File metadata and controls
321 lines (290 loc) · 11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
import logging
import os
from pathlib import Path
from typing import Dict, List
from sqlmodel import select
from sqlmodel.orm.session import Session as SQLModelSession
from werkzeug.utils import secure_filename
import murfey.server.prometheus as prom
from murfey.util import safe_run, sanitise, secure_path
from murfey.util.config import get_machine_config
from murfey.util.db import (
DataCollection,
DataCollectionGroup,
FoilHole,
GridSquare,
ProcessingJob,
RsyncInstance,
Session as MurfeySession,
)
logger = logging.getLogger("murfey.server.api.shared")
def remove_session_by_id(session_id: int, db):
session = db.exec(select(MurfeySession).where(MurfeySession.id == session_id)).one()
sessions_for_visit = db.exec(
select(MurfeySession).where(MurfeySession.visit == session.visit)
).all()
# Don't remove prometheus metrics if there are other sessions using them
if len(sessions_for_visit) == 1:
safe_run(
prom.monitoring_switch.remove,
args=(session.visit,),
label="monitoring_switch",
)
rsync_instances = db.exec(
select(RsyncInstance).where(RsyncInstance.session_id == session_id)
).all()
for ri in rsync_instances:
safe_run(
prom.seen_files.remove,
args=(ri.source, session.visit),
label="seen_files",
)
safe_run(
prom.transferred_files.remove,
args=(ri.source, session.visit),
label="transferred_files",
)
safe_run(
prom.transferred_files_bytes.remove,
args=(ri.source, session.visit),
label="transferred_files_bytes",
)
safe_run(
prom.seen_data_files.remove,
args=(ri.source, session.visit),
label="seen_data_files",
)
safe_run(
prom.transferred_data_files.remove,
args=(ri.source, session.visit),
label="transferred_data_files",
)
safe_run(
prom.transferred_data_files_bytes.remove,
args=(ri.source, session.visit),
label="transferred_data_file_bytes",
)
safe_run(
prom.skipped_files.remove,
args=(ri.source, session.visit),
label="skipped_files",
)
collected_ids = db.exec(
select(DataCollectionGroup, DataCollection, ProcessingJob)
.where(DataCollectionGroup.session_id == session_id)
.where(DataCollection.dcg_id == DataCollectionGroup.id)
.where(ProcessingJob.dc_id == DataCollection.id)
).all()
for c in collected_ids:
safe_run(
prom.preprocessed_movies.remove,
args=(c[2].id,),
label="preprocessed_movies",
)
db.delete(session)
db.commit()
logger.debug(f"Successfully removed session {session_id} from database")
return
def get_grid_squares(session_id: int, db):
grid_squares = db.exec(
select(GridSquare).where(GridSquare.session_id == session_id)
).all()
tags = {gs.tag for gs in grid_squares}
res = {}
for t in tags:
res[t] = [gs for gs in grid_squares if gs.tag == t]
return res
def get_grid_squares_from_dcg(session_id: int, dcgid: int, db) -> List[GridSquare]:
grid_squares = db.exec(
select(GridSquare, DataCollectionGroup)
.where(GridSquare.session_id == session_id)
.where(GridSquare.tag == DataCollectionGroup.tag)
.where(DataCollectionGroup.id == dcgid)
).all()
return [gs[0] for gs in grid_squares]
def get_foil_holes_from_grid_square(
session_id: int, dcgid: int, gsid: int, db
) -> List[FoilHole]:
foil_holes = db.exec(
select(FoilHole, GridSquare, DataCollectionGroup)
.where(FoilHole.grid_square_id == GridSquare.id)
.where(GridSquare.name == gsid)
.where(GridSquare.session_id == session_id)
.where(GridSquare.tag == DataCollectionGroup.tag)
.where(DataCollectionGroup.id == dcgid)
).all()
return [fh[0] for fh in foil_holes]
def get_foil_hole(session_id: int, fh_name: int, db) -> Dict[str, int]:
foil_holes = db.exec(
select(FoilHole, GridSquare)
.where(FoilHole.name == fh_name)
.where(FoilHole.session_id == session_id)
.where(GridSquare.id == FoilHole.grid_square_id)
).all()
return {f[1].tag: f[0].id for f in foil_holes}
def find_upstream_visits(session_id: int, db: SQLModelSession, max_depth: int = 2):
"""
Returns a nested dictionary, in which visits and the full paths to their directories
are further grouped by instrument name.
"""
def _recursive_search(
dirpath: str | Path,
search_string: str,
partial_match: bool = True,
max_depth: int = 1,
result: dict[str, Path] | None = None,
):
# If no dictionary was passed in, create a new dictionary
if result is None:
result = {}
# Stop recursing for this route once max depth hits 0
if max_depth == 0:
return result
# Walk through the directories
for entry in os.scandir(dirpath):
if entry.is_dir():
# Update dictionary with match and stop recursing for this route
if (
search_string in entry.name
if partial_match
else search_string == entry.name
):
if result is not None: # MyPy needs this 'is not None' check
result[entry.name] = Path(entry.path)
else:
# Continue searching down this route until max depth is reached
result = _recursive_search(
dirpath=entry.path,
search_string=search_string,
partial_match=partial_match,
max_depth=max_depth - 1,
result=result,
)
return result
murfey_session = db.exec(
select(MurfeySession).where(MurfeySession.id == session_id)
).one()
visit_name = murfey_session.visit
instrument_name = murfey_session.instrument_name
machine_config = get_machine_config(instrument_name=instrument_name)[
instrument_name
]
upstream_visits: dict[str, dict[str, Path]] = {}
# Iterates through provided upstream directories
for (
upstream_instrument,
upstream_data_dir,
) in machine_config.upstream_data_directories.items():
# Recursively look for matching visit names under current directory
upstream_visits[upstream_instrument] = _recursive_search(
dirpath=upstream_data_dir,
search_string=f"{visit_name.split('-')[0]}-",
partial_match=True,
max_depth=max_depth,
)
return upstream_visits
def gather_upstream_files(
session_id: int,
upstream_instrument: str,
upstream_visit_path: Path,
db: SQLModelSession,
):
"""
Searches the specified upstream instrument for files based on the search strings
set in the MachineConfig and returns them as a list of file paths.
"""
# Load the current instrument's machine config
murfey_session = db.exec(
select(MurfeySession).where(MurfeySession.id == session_id)
).one()
instrument_name = murfey_session.instrument_name
machine_config = get_machine_config(instrument_name=instrument_name)[
instrument_name
]
# Search for files using the configured strings for that upstream instrument
file_list: list[Path] = []
logger.info(f"Searching for files in {sanitise(str(upstream_visit_path))!r}")
if (
machine_config.upstream_data_search_strings.get(upstream_instrument, None)
is not None
):
for search_string in machine_config.upstream_data_search_strings[
upstream_instrument
]:
logger.info(f"Using search string {search_string}")
for file in upstream_visit_path.glob(search_string):
if file.is_file():
file_list.append(file)
logger.info(
f"Found {len(file_list)} files for download "
f"from {sanitise(upstream_instrument)}"
)
else:
logger.warning(
"Upstream file searching has not been configured for "
f"{sanitise(upstream_instrument)} on {sanitise(instrument_name)}"
)
return file_list
def get_upstream_file(file_path: str | Path):
file_path = Path(file_path) if isinstance(file_path, str) else file_path
file_path = secure_path(file_path)
if file_path.exists() and file_path.is_file():
return file_path
logger.warning(f"Requested file {sanitise(str(file_path))!r} was not found")
return None
def get_upstream_tiff_dirs(visit_name: str, instrument_name: str) -> List[Path]:
tiff_dirs = []
machine_config = get_machine_config(instrument_name=instrument_name)[
instrument_name
]
for directory_name in machine_config.upstream_data_tiff_locations:
for _, p in machine_config.upstream_data_directories.items():
if (Path(p) / secure_filename(visit_name)).is_dir():
processed_dir = Path(p) / secure_filename(visit_name) / directory_name
tiff_dirs.append(processed_dir)
break
if not tiff_dirs:
logger.warning(
f"No candidate directory found for upstream download from visit {sanitise(visit_name)}"
)
return tiff_dirs
def gather_upstream_tiffs(visit_name: str, session_id: int, db: SQLModelSession):
"""
Looks for TIFF files associated with the current session in the permitted storage
servers, and returns their relative file paths as a list.
"""
instrument_name = (
db.exec(select(MurfeySession).where(MurfeySession.id == session_id))
.one()
.instrument_name
)
upstream_tiff_paths = []
tiff_dirs = get_upstream_tiff_dirs(visit_name, instrument_name)
if not tiff_dirs:
return None
for tiff_dir in tiff_dirs:
for f in tiff_dir.glob("**/*.tiff"):
upstream_tiff_paths.append(str(f.relative_to(tiff_dir)))
for f in tiff_dir.glob("**/*.tif"):
upstream_tiff_paths.append(str(f.relative_to(tiff_dir)))
return upstream_tiff_paths
def get_tiff_file(
visit_name: str, session_id: int, tiff_path: str, db: SQLModelSession
):
instrument_name = (
db.exec(select(MurfeySession).where(MurfeySession.id == session_id))
.one()
.instrument_name
)
tiff_dirs = get_upstream_tiff_dirs(visit_name, instrument_name)
if not tiff_dirs:
return None
tiff_path = "/".join(secure_filename(p) for p in tiff_path.split("/"))
for tiff_dir in tiff_dirs:
tiff_file = tiff_dir / tiff_path
if tiff_file.is_file():
break
else:
logger.warning(f"TIFF {tiff_path} not found")
return None
return tiff_file