This repository was archived by the owner on Mar 6, 2024. It is now read-only.
forked from vivarium-collective/biosimulator-processes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
263 lines (217 loc) · 8.18 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
from typing import Dict
import os
import numpy as np
from basico import biomodels, load_model_from_string
from process_bigraph import Composite, pf
import nbformat
from pydantic import BaseModel
def fetch_biomodel_by_term(term: str, index: int = 0):
"""Search for models matching the term and return an instantiated model from BioModels.
Args:
term:`str`: search term
index:`int`: selector index for model choice
Returns:
`CDataModel` instance of loaded model.
TODO: Implement a dynamic search of this
"""
models = biomodels.search_for_model(term)
model = models[index]
return fetch_biomodel(model['id'])
def fetch_biomodel(model_id: str):
# TODO: make this generalizable for those other than basico
sbml = biomodels.get_content_for_model(model_id)
return load_model_from_string(sbml)
def play_composition(instance: dict, duration: int):
"""Configure and run a Composite workflow."""
workflow = Composite({
'state': instance
})
workflow.run(duration)
results = workflow.gather_results()
print(f'RESULTS: {pf(results)}')
return results
def generate_copasi_process_emitter_schema():
return generate_emitter_schema(
emitter_address='local',
emitter_type='ram-emitter',
floating_species='tree[float]',
time='float'
)
def generate_single_copasi_process_instance(instance_name: str, config: Dict, add_emitter: bool = True) -> Dict:
"""Generate an instance of a single copasi process which is named `instance_name`
and configured by `config` formatted to the process bigraph Composite API.
Args:
instance_name:`str`: name of the new instance referenced by PBG.
config:`Dict`: see `biosimulator_processes.processes.copasi_process.CopasiProcess`
add_emitter:`bool`: Adds emitter schema configured for CopasiProcess IO store if `True`. Defaults
to `True`.
"""
instance = {}
instance[instance_name] = {
'_type': 'process',
'address': 'local:copasi',
'config': config,
'inputs': {
'floating_species': ['floating_species_store'],
'model_parameters': ['model_parameters_store'],
'time': ['time_store'],
'reactions': ['reactions_store']
},
'outputs': {
'floating_species': ['floating_species_store'],
'time': ['time_store'],
}
}
if add_emitter:
instance['emitter'] = generate_copasi_process_emitter_schema()
return instance
def generate_single_process_instance(
instance_name: str,
instance_config: dict,
inputs_config: dict,
outputs_config: dict,
instance_type: str = 'step',
instance_address: str = 'local:copasi',
) -> dict:
return {
instance_name: {
'_type': instance_type,
'address': instance_address,
'config': instance_config,
'inputs': inputs_config,
'outputs': outputs_config
}
}
def generate_emitter_schema(
emitter_address: str = "local",
emitter_type: str = "ram-emitter",
**emit_values_schema
) -> Dict:
"""
Args:
**emit_values_schema:`kwargs`: values to be emitted by the address
"""
return {
'_type': 'step',
'address': 'local:ram-emitter',
'config': {
'emit': {**emit_values_schema},
},
'inputs': { # TODO: make this generalized
'floating_species': ['floating_species_store'],
'time': ['time_store'],
}
}
def generate_parameter_scan_instance(
num_iterations: int,
entry_config: Dict,
modeler: str,
add_emitter: bool = True,
*parameters,
**io_config
) -> Dict:
"""Generate a parameter scan instance configuration for a given process."""
instance = {}
for n in range(num_iterations):
iteration_name = f'{modeler}_{n}'
iteration_instance = generate_single_process_instance(
instance_name=iteration_name,
instance_config=entry_config,
inputs_config=io_config['inputs'],
outputs_config=io_config['outputs'])
for instance_name, instance_schema in iteration_instance:
instance[instance_name] = iteration_instance
if add_emitter:
instance['emitter'] = generate_emitter_schema(
floating_species='tree[float]',
time='float')
return instance
def generate_copasi_parameter_scan_instance(
num_iterations: int,
entry_config: Dict,
# *parameters
):
# TODO: select parameters
parameter_scan_instance = {}
origin_value = 3.00
for n in range(num_iterations):
iteration_model_config = generate_sed_model_config_schema(
entrypoint={'biomodel_id': 'BIOMD0000000051'},
species_changes={'Extracellular Glucose': {'initial_concentration': origin_value**n}},
parameter_changes={'catp': {'initial_value': (origin_value - n)**n}}
)
iteration_instance = generate_single_copasi_process_instance(
instance_name=f'copasi_{n}',
config=iteration_model_config,
add_emitter=False
)
for iter_name, iter_config in iteration_instance.items():
parameter_scan_instance[iter_name] = iteration_instance
emitter_schema = generate_emitter_schema(floating_species='tree[float]', time='float')
parameter_scan_instance['emitter'] = emitter_schema
return parameter_scan_instance
def generate_sed_model_config_schema(
entrypoint: Dict,
species_changes: Dict,
parameter_changes: Dict,
reaction_changes: Dict = None
) -> Dict:
"""
Args:
entrypoint:`Dict[str, str]`: per CopasiProcess config_schema; ie: {'biomodel_id': 'BIOMODEL>>>>'}
species_changes:`Dict[str, Dict[str, any]]`: ie: {'a': {'initial_concentration': 32.3}}
parameter_changes:`Dict[str, Dict[str, any]]`: ie: {'x2': {'expression': '1 -> A..'}}
reaction_changes:`Dict[str, Union[str, Dict[str, Dict[str, any]]]`:
reaction_changes = {
'R1': {
'reaction_parameters': {
'(R1).k1': 23.2
},
'reaction_scheme': 'A -> B'
}
Example:
sed_model = sed_schema(
entrypoint={'biomodel_id': 'BIOMD0000000051'},
species_changes={'Extracellular Glucose': {'initial_concentration': 5.00}},
parameter_changes={'catp': {'initial_value': 100.00}},
reaction_changes={'Aldolase': {'scheme': 'A -> B'}}
)
"""
instance_schema = {
'model': {
'model_changes': {
'species_changes': species_changes,
'parameter_changes': parameter_changes,
'reaction_changes': reaction_changes
}
}
}
for param_name, param_val in entrypoint.items():
instance_schema['model'][param_name] = {
param_name: param_val
}
return instance_schema
def perturb_parameter(num_iterations: int, degree: float):
_range = []
for n in range(num_iterations):
if n > 0:
n = n * degree
_range.append(n)
return _range
def perturb_parameter_numpy(num_iterations: int, degree: float):
return np.linspace(start=0, stop=num_iterations, num=degree)
def fix_execution_count(notebook_path):
with open(notebook_path, 'r', encoding='utf-8') as f:
nb = nbformat.read(f, as_version=4)
for cell in nb['cells']:
if cell['cell_type'] == 'code':
if 'execution_count' not in cell:
cell['execution_count'] = None
print('execution_count' in cell)
with open(notebook_path, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
def fix_notebooks_execution_count():
for root, _, files in os.walk('../notebooks'):
for _file in files:
notebook_path = os.path.join(root, _file)
fix_execution_count(notebook_path)