Skip to content

Commit

Permalink
WIP - Thu May 7 13:18:35 EDT 2020
Browse files Browse the repository at this point in the history
  • Loading branch information
jdmonaco committed May 7, 2020
1 parent bbd9e70 commit 2e53e96
Show file tree
Hide file tree
Showing 24 changed files with 887 additions and 205 deletions.
5 changes: 3 additions & 2 deletions floyd/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,17 @@
VERSION = "0.2.3"


from .base import *
from .state import *
from .config import *
from .network import *
from .recorder import *
from .context import *
from .clocks import *
from .neurons import *
from .synapses import *
# from .synapses import *
from .input import *
from .delay import *
# from .delay import *
from .noise import *
from .simplot import *
from .graph import *
Expand Down
6 changes: 6 additions & 0 deletions floyd/base/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
"""
Base classes for neuron groups and synapses to connect them together.
"""

from .groups import BaseUnitGroup
from .projection import BaseProjection
39 changes: 21 additions & 18 deletions floyd/groups.py → floyd/base/groups.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,30 +16,29 @@ class BaseUnitGroup(TenkoObject):
base_dtypes = {}
default_dtype = 'f'

def __init__(self, *, N, dtype=None, **kwargs):
def __init__(self, *, name, shape, dtype=None, **kwargs):
"""
Create a group of model units with named variables.
Class-defined unit variables are made available as instance attributes.
Class-defined unit variables are made available as instance attributes
and initialized to zero-valued arrays.
Arguments
---------
N : int | tuple of ints
The size of the group (or shape if multiple dimensions needed)
name : str
Unique name given to the group
shape : int | tuple of ints
The shape or size (1D) of the group
dtype : dict | '?' | 'u' | 'i' | 'f' | 'd', optional (default 'f')
Default numpy dtype to use for initializing array variables
"""
super().__init__(**kwargs)
super().__init__(name=name, **kwargs)

self.shape = N
if np.iterable(self.shape):
self.N = reduce(op.mul, self.shape)
if np.iterable(shape):
self.shape = tuple(shape)
self.size = reduce(op.mul, self.shape)
else:
self.N = self.shape
self.size = int(shape)
self.shape = (self.size,)

self._vardtypes = dict(self.base_dtypes)
dflt_dtype = self.default_dtype
Expand All @@ -48,7 +47,8 @@ def __init__(self, *, N, dtype=None, **kwargs):
elif type(dtype) is str and dtype[0] in '?uifd':
dflt_dtype = dtype

self._variables = list(set(self.base_variables + self.extra_variables))
allvars = set(self.base_variables + self.extra_variables)
self._variables = tuple(sorted(allvars))
for varname in self._variables:
thisdtype = self._vardtypes.get(varname, dflt_dtype)
self.__dict__[varname] = zeros(self.shape, thisdtype)
Expand All @@ -60,13 +60,16 @@ def set(self, **values):
for name, value in values.items():
setattr(self, name, value)

def _evaluate(self, value):
if hasattr(value, 'sample'):
return value.sample(self.shape, state=self.rnd)
return value

def __setattr__(self, name, value):
"""
For named group variable, use in-place setting of array values.
"""
if hasattr(self, '_variables') and name in self._variables:
getattr(self, name)[:] = self._evaluate(value)
return
super().__setattr__(name, value)

def _evaluate(self, value):
if hasattr(value, 'sample'):
return value.sample(self.shape, state=self.rnd)
return value
15 changes: 15 additions & 0 deletions floyd/base/input.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
"""
Base group class for read-only inputs.
"""

from .groups import BaseUnitGroup


class BaseInputGroup(BaseUnitGroup):

def __init__(self, **kwargs):
super().__init__(**kwargs)

def update(self):
self.output = 0 # output variable indicates the signal
# transmitted through the efferent projections
230 changes: 230 additions & 0 deletions floyd/base/neuron.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
"""
Base class for unit groups of model neurons.
"""

__all__ = ('BaseNeuronGroup',)


import copy
from functools import partial

from toolbox.numpy import *
from specify import Specified, Param, LogSlider, is_param
from specify.utils import get_all_slots

from ..state import State, RunMode
from ..noise import OUNoiseProcess

from .groups import BaseUnitGroup


class BaseNeuronGroup(Specified, BaseUnitGroup):

"""
Common functionality for model neuron groups.
"""

stochastic = Param(False, doc='use random-process inputs')

base_variables = ('x', 'y', 'output', 'I_app', 'I_app_unit', 'I_net',
'I_leak', 'I_inh', 'I_exc', 'I_proxy', 'I_total')

def __init__(self, *, name, shape, g_log_range=(-5, 5), g_step=0.05,
**kwargs):
"""
Initialize data structures, noise, sources, and context gain parameters
for this model neuron group.
"""
self._initialized = False
super().__init__(name=name, shape=shape, **kwargs)

if self.stochastic:
# Set up the intrinsic noise inputs (current-based only for
# rate-based neurons). In interactive run mode, generators are used
# to provide continuous noise.

self.oup = OUNoiseProcess(N=self.N, tau=self.tau_noise,
seed=self.name+'_ratenoise')
if State.run_mode == RunMode.INTERACT:
self.eta_gen = self.oup.generator()
self.eta = next(self.eta_gen)
else:
self.oup.compute()
self.eta = self.oup.eta[...,0]
else:
self.oup = None
self.eta = zeros(self.N)

# Mapping to store afferent projections
self.synapses = {}

# Add any conductance gain values in the shared context as Params
self.gain_keys = []
self.gain_param_base = LogSlider(default=0.0, start=g_log_range[0],
end=g_log_range[1], step=g_step, units='nS')
for k, v in vars(State.context.__class__).items():
if k.startswith(f'g_{name}_'):
self._add_gain_spec(k, v)

if State.is_defined('network'):
State.network.add_neuron_group(self)

self._initialized = True

def _add_gain_spec(self, gname, value):
"""
Add Param (slider) objects for any `g_{post.name}_{pre.name}` class
attributes (Param object or just default values) of the shared context.
"""
_, post, pre = gname.split('_')
new_param = copy.copy(self.gain_param_base)
new_param.doc = f'{pre}->{post} max conductance'
new_all_slots = get_all_slots(type(new_param))

if is_param(value):
old_all_slots = get_all_slots(type(value))
for k in old_all_slots:
if hasattr(value, k) and getattr(value, k) is not None and \
k in new_all_slots:
slotval = copy.copy(getattr(value, k))
object.__setattr__(new_param, k, slotval)
value = new_param.default
else:
new_param.default = copy.deepcopy(value)

self.add_param(gname, new_param)
self.gain_keys.append(gname)
self.debug(f'added gain {gname!r} with value {new_param.default!r}')

def add_projection(self, synapses):
"""
Add afferent synaptic pathway to this neuron group.
"""
if synapses.post is not self:
self.out('{} does not project to {}', synapses.name, self.name,
error=True)
return

# Add synapses to mapping of afferent inputs
gname = 'g_{}_{}'.format(synapses.post.name, synapses.pre.name)
self.synapses[gname] = synapses

# Check whether the gain spec has already been found in the
# context. If not, then add a new Param to the spec with a
# default value of 0.0 (log10(1)).
#
# Gain spec names take the form `g_<post.name>_<pre.name>`.

if gname in self.gain_keys:
self.debug(f'gain spec {gname!r} exists for {synapses.name!r}')
else:
self._add_gain_spec(gname, 0.0)
self.debug(f'added gain spec {gname!r} for {synapses.name!r}')

def update(self):
"""
Update the model neurons.
"""

self.update_rates()
self.update_currents()
self.update_noise()

def update_rates(self):
"""
Evolve the neuronal firing rate variable according to input currents.
"""
self.r += State.dt * self.I_total
self.r[self.r<0] = 0.0

def update_currents(self):
"""
Update total input conductances for afferent synapses.
"""
self.I_exc = 0.0
self.I_inh = 0.0

for gname in self.S_exc.keys():
self.I_exc += 10**self[gname] * self.S_exc[gname].I_total
for gname in self.S_inh.keys():
self.I_inh -= 10**self[gname] * self.S_inh[gname].I_total

self.I_leak = self.r_rest - self.r
self.I_proxy = self.I_noise * self.eta
self.I_app = self.I_DC_mean + self.I_app_unit
self.I_net = self.I_exc + self.I_inh + self.I_proxy + self.I_app
self.I_total = (self.I_leak + self.I_net) * self.excitability

def update_noise(self):
"""
Update the intrinsic noise sources (for those with nonzero gains).
"""
if self.oup is None: return
if State.run_mode == RunMode.INTERACT:
if self.I_noise: self.eta = next(self.eta_gen)
else:
if self.I_noise: self.eta = self.oup.eta[...,State.n]

def rates(self):
"""
Return the firing rates in the calculation window.
"""
return self.r[:]

def mean_rate(self):
"""
Return the mean firing rate in the calculation window.
"""
return self.r.mean()

def active_mean_rate(self):
"""
Return the mean firing rate of active neurons in the calculation window.
"""
return self.r[self.r>0].mean()

def active_fraction(self):
"""
Return the active fraction of neurons in the calculation window.
"""
return (self.r > 0).sum() / self.N

def set_pulse_metrics(self, active=(10, 90, 10), rate=(1, 100, 20),
only_active=True):
"""
Set (min, max, smoothness) for active fraction and mean rate.
"""
pulse = lambda l, u, k, x: \
(1 + 1/(1 + exp(-k * (x - u))) - 1/(1 + exp(k * (x - l)))) / 2

self.active_pulse = partial(pulse, *active)
self.rate_pulse = partial(pulse, *rate)
self.pulse_only_active = only_active

def pulse(self):
"""
Return a [0,1] "pulse" metric of the healthiness of activity.
"""
apulse = self.active_pulse(self.active_fraction())
if self.pulse_only_active:
rpulse = self.rate_pulse(self.active_mean_rate())
else:
rpulse = self.rate_pulse(self.mean_rate())

# As in kurtosis calculations, use the 4th power to emphasize
# the extremities, then the mean tells you at least one or the other is
# currently at extremes.

return (apulse**4 + rpulse**4) / 2

def get_neuron_sliders(self):
"""
Return a tuple of Panel FloatSlider objects for neuron Param values.
"""
return self.get_widgets(exclude=self.gain_keys)

def get_gain_sliders(self):
"""
Return a tuple of Panel FloatSlider objects for gain Param values.
"""
return self.get_widgets(include=self.gain_keys)
Loading

0 comments on commit 2e53e96

Please sign in to comment.