<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Comput. Neurosci.</journal-id>
<journal-title>Frontiers in Computational Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Comput. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5188</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fncom.2023.1011814</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Computational Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Biophysical parameters control signal transfer in spiking network</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Garnier Arti&#x00F1;ano</surname> <given-names>Tom&#x00E1;s</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x2020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1951854/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Andalibi</surname> <given-names>Vafa</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Atula</surname> <given-names>Iiris</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Maestri</surname> <given-names>Matteo</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1835806/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Vanni</surname> <given-names>Simo</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/277098/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Helsinki University Hospital (HUS) Neurocenter, Neurology, Helsinki University Hospital</institution>, <addr-line>Helsinki</addr-line>, <country>Finland</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Neurosciences, Clinicum, University of Helsinki</institution>, <addr-line>Helsinki</addr-line>, <country>Finland</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Computer Science, Indiana University Bloomington</institution>, <addr-line>Bloomington, IN</addr-line>, <country>United States</country></aff>
<aff id="aff4"><sup>4</sup><institution>Department of Biomedical and Neuromotor Sciences, University of Bologna</institution>, <addr-line>Bologna</addr-line>, <country>Italy</country></aff>
<aff id="aff5"><sup>5</sup><institution>Department of Physiology, Medicum, University of Helsinki</institution>, <addr-line>Helsinki</addr-line>, <country>Finland</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Markus Diesmann, Helmholtz Association of German Research Centres (HZ), Germany</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Viktor B. Kazantsev, Lobachevsky State University of Nizhny Novgorod, Russia; Vincenzo Marra, University of Leicester, United Kingdom</p></fn>
<corresp id="c001">&#x002A;Correspondence: Simo Vanni, <email>simo.vanni@helsinki.fi</email></corresp>
<fn fn-type="present-address" id="fn002"><p>Present addresses: Tom&#x00E1;s Garnier Arti&#x00F1;ano, Centre Broca Nouvelle-Aquitaine, Universit&#x00E9; de Bordeaux, Bordeaux, France; Interdisciplinary Institute for Neuroscience, UMR 5297, University of Bordeaux, Bordeaux, France</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>25</day>
<month>01</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>17</volume>
<elocation-id>1011814</elocation-id>
<history>
<date date-type="received">
<day>04</day>
<month>08</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>09</day>
<month>01</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2023 Garnier Arti&#x00F1;ano, Andalibi, Atula, Maestri and Vanni.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Garnier Arti&#x00F1;ano, Andalibi, Atula, Maestri and Vanni</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Information transmission and representation in both natural and artificial networks is dependent on connectivity between units. Biological neurons, in addition, modulate synaptic dynamics and post-synaptic membrane properties, but how these relate to information transmission in a population of neurons is still poorly understood. A recent study investigated local learning rules and showed how a spiking neural network can learn to represent continuous signals. Our study builds on their model to explore how basic membrane properties and synaptic delays affect information transfer.</p>
</sec>
<sec>
<title>Methods</title>
<p>The system consisted of three input and output units and a hidden layer of 300 excitatory and 75 inhibitory leaky integrate-and-fire (LIF) or adaptive integrate-and-fire (AdEx) units. After optimizing the connectivity to accurately replicate the input patterns in the output units, we transformed the model to more biologically accurate units and included synaptic delay and concurrent action potential generation in distinct neurons. We examined three different parameter regimes which comprised either identical physiological values for both excitatory and inhibitory units (Comrade), more biologically accurate values (Bacon), or the Comrade regime whose output units were optimized for low reconstruction error (HiFi). We evaluated information transmission and classification accuracy of the network with four distinct metrics: coherence, Granger causality, transfer entropy, and reconstruction error.</p>
</sec>
<sec>
<title>Results</title>
<p>Biophysical parameters showed a major impact on information transfer metrics. The classification was surprisingly robust, surviving very low firing and information rates, whereas information transmission overall and particularly low reconstruction error were more dependent on higher firing rates in LIF units. In AdEx units, the firing rates were lower and less information was transferred, but interestingly the highest information transmission rates were no longer overlapping with the highest firing rates.</p>
</sec>
<sec>
<title>Discussion</title>
<p>Our findings can be reflected on the predictive coding theory of the cerebral cortex and may suggest information transfer qualities as a phenomenological quality of biological cells.</p>
</sec>
</abstract>
<kwd-group>
<kwd>microcircuit</kwd>
<kwd>spiking network model</kwd>
<kwd>neural coding</kwd>
<kwd>predictive coding</kwd>
<kwd>classification</kwd>
</kwd-group>
<contract-num rid="cn001">Y124930060</contract-num>
<contract-sponsor id="cn001">Helsingin ja Uudenmaan Sairaanhoitopiiri<named-content content-type="fundref-id">10.13039/100008376</named-content></contract-sponsor>
<contract-sponsor id="cn002">Universit&#x00E0; di Bologna<named-content content-type="fundref-id">10.13039/501100005969</named-content></contract-sponsor>
<counts>
<fig-count count="9"/>
<table-count count="1"/>
<equation-count count="16"/>
<ref-count count="55"/>
<page-count count="18"/>
<word-count count="12611"/>
</counts>
</article-meta>
</front>
<body>
<sec id="S1" sec-type="intro">
<title>1. Introduction</title>
<p>How sensory signals are processed by the cerebral cortex to generate relevant behavior remains an open question. Hypothetical mechanisms of these biological computations have been searched for in multiple theoretical studies (<xref ref-type="bibr" rid="B29">Grossberg, 1980</xref>; <xref ref-type="bibr" rid="B40">Mumford, 1992</xref>; <xref ref-type="bibr" rid="B45">Rao and Ballard, 1999</xref>; <xref ref-type="bibr" rid="B5">Baddeley, 2000</xref>; <xref ref-type="bibr" rid="B7">Barlow, 2001</xref>; <xref ref-type="bibr" rid="B22">Friston, 2010</xref>; <xref ref-type="bibr" rid="B42">Panzeri et al., 2015</xref>). Recapitulating these theories, sensory systems maximize discriminable states, or representations, given the available resources. In other words, the system optimizes decoding by finding causes, hypotheses, or predictions of the input. When a match between input and an expectation is reached, the system builds a resonant state and avoids surprises by minimizing free energy or, alternatively, represents information with economy of space, weight, and energy.</p>
<p>Although the nature of the code itself is unknown, biological evidence shows learning is key to generating sensory representations and complex behavior (<xref ref-type="bibr" rid="B13">Buonomano and Merzenich, 1998</xref>; <xref ref-type="bibr" rid="B19">Destexhe and Marder, 2004</xref>; <xref ref-type="bibr" rid="B43">Pascual-Leone et al., 2005</xref>). Given the ability of a neural system with a non-linear transfer function to compute any function (<xref ref-type="bibr" rid="B32">Hornik et al., 1989</xref>), it has become possible to teach a spiking network to follow and decode arbitrary noise patterns (<xref ref-type="bibr" rid="B10">Brendel et al., 2020</xref>, referred as Brendel model below). The significance of the Brendel model emerge from the computational interpretation of membrane voltage as an error signal from predicted input, thus directly linking the predictive coding model to biophysical parameters (<xref ref-type="bibr" rid="B18">Den&#x00E8;ve et al., 2017</xref>).</p>
<p>We build on the Brendel model after the training was finished and connections were fixed (<xref ref-type="fig" rid="F1">Figure 1</xref>) and asked how basic membrane characteristics and the synaptic delay change information transfer and representation. Such a simple system provides an optimal window to capture extra-synaptic effects on information flow because it has minimal unintended non-linearities.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption><p>Model structure and performance at Comrade unit class after learning optimal connections with the Brendel model. <bold>(A)</bold> Model structure. Input consisted of Gaussian noise which was injected as de- and hyperpolarizing currents to all excitatory units. The action potential output from the excitatory units to the three output units contain both positive and negative weights. Other connections (EE, EI, II, IE) are either de- or hyperpolarizing, but not both. Blue = excitatory units and pathways; red = inhibitory units and pathways; arrowhead = signals are both positive and negative; T line end: hyperpolarizing connection; reverse arrowhead: depolarizing connection. <bold>(B)</bold> Sinusoidal input and corresponding output unit activity at Comrade class search start point (<xref ref-type="table" rid="T1">Table 1</xref>). The output reaches action potential threshold at the peak. The first and last 200 ms of the 2-s simulation time are omitted to avoid edge effects. <bold>(C)</bold> Three concurrent smoothed Gaussian noise stimuli at input units (gray) are clearly separable in the three output unit membrane voltages (black). Most high-frequency deflections are lost, resulting in clearly separate but not very accurate replication of the input. <bold>(D)</bold> Spiking activity in the excitatory and inhibitory units show modest firing rates for the three Gaussian noise stimuli. <bold>(E)</bold> f&#x2013;I curve for Comrade units and representative excitatory (blue) and inhibitory (red) neurones membrane potential dynamics.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-17-1011814-g001.tif"/>
</fig>
<table-wrap position="float" id="T1">
<label>TABLE 1</label>
<caption><p>Search ranges of the physiological parameters.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<td valign="top" align="left" style="color:#ffffff;background-color: #7f8080;">Parameter</td>
<td valign="top" align="center" style="color:#ffffff;background-color: #7f8080;">Inhibitory</td>
<td valign="top" align="center" style="color:#ffffff;background-color: #7f8080;">Excitatory</td>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Capacitance (pF)</td>
<td valign="top" align="center">30&#x2014;270 (10)</td>
<td valign="top" align="center">30&#x2014;130 (10)</td>
</tr>
<tr>
<td valign="top" align="left">Leak conductance (nS)</td>
<td valign="top" align="center">1&#x2014;28 (1)</td>
<td valign="top" align="center">0.5&#x2014;15 (1)</td>
</tr>
<tr>
<td valign="top" align="left">Leak equilibrium potential (mV)</td>
<td valign="top" align="center">&#x2212;85&#x2014; &#x2212;35 (5)</td>
<td valign="top" align="center">&#x2212;85&#x2014; &#x2212;20 (5)</td>
</tr>
<tr>
<td valign="top" align="left">AP threshold (mV)</td>
<td valign="top" align="center">&#x2212;65&#x2014; &#x2212;15 (3)</td>
<td valign="top" align="center">&#x2212;67&#x2014; &#x2212;35 (3)</td>
</tr>
<tr>
<td valign="top" align="left">Synaptic delay (ms)</td>
<td valign="top" align="center">0.5&#x2014;25 (0.25)</td>
<td valign="top" align="center">0.5&#x2014;25 (0.25)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p>The values were selected to cover most of the dynamic regime. The values show range min&#x2013;range max (step size). All three unit-classes were searched with the same values. During search, the same Synaptic delay value was applied to EE, EI, II, and IE connections, while the E to output unit delay was fixed (3 ms for Comrade and Bacon, and 1 ms for the HiFi unit-class).</p></fn>
</table-wrap-foot>
</table-wrap>
<p>We used four distinct signal transmission metrics housing mutually complementary features. Our first metric is <italic>coherence</italic> which is widely used in the signal analysis as metric for linear relationship between two analog signals (<xref ref-type="bibr" rid="B23">Gardner, 1992</xref>). It is typically applied with spectral analysis, allowing natural division of signal transfer into spectral components. Our second metric is <italic>Granger causality</italic> which, as transfer entropy, has an information-theoretical interpretation (<xref ref-type="bibr" rid="B8">Barnett et al., 2009</xref>; <xref ref-type="bibr" rid="B9">Bossomaier et al., 2016</xref>). Granger causality assumes that cause precedes its effect and that the cause contains information about the effect that is unique to the cause. While Granger causality is the computationally heaviest of our metrics and sometimes numerically unstable, it can in practice be applied to much longer delays than the transfer entropy. Our third metric, <italic>transfer entropy</italic>, is an information theoretical metric that measures the directed exchange of information between two systems (<xref ref-type="bibr" rid="B49">Schreiber, 2000</xref>; <xref ref-type="bibr" rid="B9">Bossomaier et al., 2016</xref>). In contrast to mutual information, which is widely used in neuroscience, transfer entropy has a direction, and it can separate the exchange of information from the effects of common history. It is strongly limited by large dimensionality and thus in this study, we were limited to a single time point history for input (optimally shifted in time) and output. Our fourth and final metric, <italic>normalized reconstruction error</italic>, is based on Brendel model error metric. It is a simple measure of similarity of the waveforms between input and output and is very sensitive to temporal delays.</p>
</sec>
<sec id="S2">
<title>2. Model</title>
<sec id="S2.SS1">
<title>2.1. Base model and training</title>
<p><xref ref-type="supplementary-material" rid="TS1">Supplementary table</xref> describes our model according to <xref ref-type="bibr" rid="B41">Nordlie et al. (2009)</xref>. Our model (<xref ref-type="fig" rid="F1">Figure 1A</xref>) followed the overall structure of <xref ref-type="bibr" rid="B10">Brendel et al. (2020)</xref> model. It consisted of a network of 300 excitatory and 75 inhibitory leaky integrate-and-fire (LIF) or adaptive exponential integrate-and-fire (AdEx) units which were fully connected within and between groups. In contrast to Brendel model, our three decoding units were LIF units for both the LIF and AdEx simulations. We fed the network three temporally low-pass filtered (Gaussian filter with standard deviation of 3 ms) white noise signals with Gaussian distribution of amplitude values. The three input signals were injected as currents to all the 300 excitatory units. The three output units received their input from all the excitatory units. The connection weights from input to excitatory and from excitatory to output units had both positive and negative values, necessary to capture both positive and negative deviations from the baseline. The connections within and between the inhibitory and excitatory groups were all positive.</p>
<p>Connection weights were learned using the Brendel model&#x2019;s simulation code that is written in Matlab<sup>&#x00AE;</sup> and is publicly available on Github<sup><xref ref-type="fn" rid="footnote1">1</xref></sup>. As in the original code, we used time step 0.0001, membrane time constant of 50 time points, integration constant for feedforward input to excitatory population 300 time points, integration constant from excitatory to inhibitory population 50 time points, learning rate 0.00001 for learning the input to excitatory and excitatory to inhibitory connections, learning rate of 0.0001 for EE, II, and IE plasticity. The feedforward weights were generated from random normal distribution and normalized to sum of one. The initial connection weights within and between the excitatory (Exc) and inhibitory (Inh) groups were: Exc =&#x003E; Exc zero, except autapses &#x2212;0.02; Exc =&#x003E; Inh every Inh neuron received connections from four Exc neurons with weights 0.5, others zero; Inh =&#x003E; Inh zero, except autapses &#x2212;0.5; Inh =&#x003E; Exc every inhibitory unit connected to four excitatory units with weights &#x2212;0.15, others zero. The action potential thresholds were set to half of the norm of the feedforward weights.</p>
<p>After learning, these weights were transferred into our more physiological model. To achieve a dynamic system with synaptic conductances at the nanosiemens scale, connection weights, except connections from the excitatory to output group, were scaled with a factor of 10<sup>&#x2013;7</sup>.</p>
<p>The Brendel model calculated the decoding connections from the excitatory group to the output group by a linear readout model:</p>
<disp-formula id="S2.E1">
<label>(1)</label>
<mml:math id="M1"><mml:mrow><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi>x</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover><mml:mi>j</mml:mi></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo lspace="2.8pt" rspace="2.8pt">)</mml:mo></mml:mrow></mml:mrow><mml:mo lspace="2.8pt" rspace="2.8pt">=</mml:mo><mml:mrow><mml:munderover><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo><mml:mrow><mml:mpadded width="+3.3pt"><mml:mi>k</mml:mi></mml:mpadded><mml:mo lspace="2.8pt" rspace="2.8pt">=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:munderover><mml:mrow><mml:msub><mml:mi>D</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>r</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>where <inline-formula><mml:math id="INEQ1"><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi>x</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover><mml:mi>j</mml:mi></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> is the estimated readout at output unit <italic>j</italic> at time <italic>t, D</italic><sub><italic>jk</italic></sub> is the decoding weight between the <italic>k</italic>-th excitatory unit and the <italic>j</italic>-th output unit, and <italic>r</italic><sub><italic>k</italic></sub> (<italic>t</italic>) is the filtered spike train on the neuron receiving the signal. We copied these connections and scaled them with 3 &#x002A; 10<sup>&#x2013;8</sup> to have the output reach maximum dynamic membrane voltage range but with a very low number of spikes. This membrane voltage constitutes our readout trace which was then compared to input.</p>
</sec>
<sec id="S2.SS2">
<title>2.2. Current model and parameter exploration</title>
<p>Simulations were run in CxSystem2 (<xref ref-type="bibr" rid="B3">Andalibi et al., 2019</xref>) with a LIF model:</p>
<disp-formula id="S2.E2">
<label>(2)</label>
<mml:math id="M2"><mml:mrow><mml:mpadded width="+3.3pt"><mml:mfrac><mml:mrow><mml:mi>d</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mi>m</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:mfrac></mml:mpadded><mml:mo lspace="2.8pt" rspace="2.8pt">=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mi>g</mml:mi><mml:mi>L</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:mi>m</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:mrow><mml:mi>E</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:mrow><mml:mo rspace="2.8pt">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>g</mml:mi><mml:mi>e</mml:mi></mml:msub><mml:mpadded width="+2pt"><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>u</mml:mi><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mpadded></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:msub><mml:mi>g</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mpadded width="+2pt"><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>u</mml:mi><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mpadded></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>I</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>x</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mi>C</mml:mi></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>where <italic>C</italic> denotes the membrane capacitance, <italic>vm</italic> the membrane voltage, <italic>EL</italic> is the leak equilibrium potential, <italic>gL</italic> is the leak conductance, <italic>g</italic><sub><italic>e</italic></sub> is the excitatory conductance, <italic>g</italic><sub><italic>i</italic></sub> is the inhibitory conductance, which is inverted by the &#x2013; sign, and <italic>Iext (t,i)</italic> is the input signal current injection (excitatory neurons only) and where the <italic>t</italic> is time and <italic>i</italic> the unit. To avoid too large deviation from the Brendel model, neither excitatory nor inhibitory synapses had driving forces, instead, their conductances were converted to voltages by multiplication of connection weight with the unit of membrane potential, volt. After a presynaptic action potential, the <italic>g</italic><sub><italic>e</italic></sub> and <italic>g</italic><sub><italic>i</italic></sub> dynamics followed exponential function:</p>
<disp-formula id="S2.E3">
<label>(3)</label>
<mml:math id="M3"><mml:mrow><mml:mpadded width="+3.3pt"><mml:mfrac><mml:mrow><mml:mi>d</mml:mi><mml:mi>g</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:mfrac></mml:mpadded><mml:mo lspace="2.8pt" rspace="2.8pt">=</mml:mo><mml:mfrac><mml:mrow><mml:mo>-</mml:mo><mml:mi>g</mml:mi></mml:mrow><mml:mn>&#x03C4;</mml:mn></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>Where &#x03C4; is the time constant of the decay. The AdEx units followed model:</p>
<disp-formula id="S2.E4">
<label>(4)</label>
<mml:math id="M4"><mml:mfrac><mml:mrow><mml:mi>d</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mi>m</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:mfrac><mml:mrow><mml:mi/><mml:mo lspace="2.8pt" rspace="2.8pt">=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mrow><mml:mrow><mml:mrow><mml:mrow><mml:mo lspace="2.8pt" rspace="2.8pt">-</mml:mo><mml:mrow><mml:mi>g</mml:mi><mml:mi>L</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:mi>m</mml:mi></mml:msub><mml:mo lspace="2.8pt" rspace="2.8pt">-</mml:mo><mml:mrow><mml:mi>E</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mn>&#x25B3;</mml:mn><mml:mi>T</mml:mi></mml:msub><mml:mi>g</mml:mi><mml:mi>L</mml:mi><mml:msup><mml:mi>e</mml:mi><mml:mfrac><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:mi>m</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:mrow><mml:mi>V</mml:mi><mml:mi>T</mml:mi></mml:mrow></mml:mrow><mml:msub><mml:mn>&#x25B3;</mml:mn><mml:mi>T</mml:mi></mml:msub></mml:mfrac></mml:msup></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:mpadded width="+2pt"><mml:mi>w</mml:mi></mml:mpadded></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>g</mml:mi><mml:mi>e</mml:mi></mml:msub><mml:mpadded width="+2pt"><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>u</mml:mi><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mpadded></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:msub><mml:mi>g</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mpadded width="+2pt"><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>u</mml:mi><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mpadded></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>I</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>x</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mi>C</mml:mi></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>Where &#x0394;<sub><italic>T</italic></sub> is the slope factor or sharpness of action potential non-linearity and VT is the action potential threshold.</p>
<p>The adaptation current <italic>w</italic> has a dynamics:</p>
<disp-formula id="S2.E5">
<label>(5)</label>
<mml:math id="M5"><mml:mrow><mml:mpadded width="+3.3pt"><mml:mfrac><mml:mrow><mml:mi>d</mml:mi><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:mfrac></mml:mpadded><mml:mo lspace="2.8pt" rspace="2.8pt">=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:mi>m</mml:mi></mml:msub><mml:mo lspace="2.8pt" rspace="2.8pt">-</mml:mo><mml:mrow><mml:mi>E</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:mrow><mml:mo lspace="2.8pt" rspace="2.8pt">)</mml:mo></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:mi>w</mml:mi></mml:mrow><mml:msub><mml:mn>&#x03C4;</mml:mn><mml:mi>w</mml:mi></mml:msub></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>Where <italic>&#x03C4;<sub><italic>w</italic></sub></italic> is the time constant and <italic>a</italic> represents adaptation below the spiking threshold. At each action potential, the <italic>w</italic> is increased by <italic>b</italic>, which represents spike-triggered adaptation.</p>
<sec id="S2.SS2.SSS1">
<title>2.2.1. Comrade units</title>
<p>We had three physiological regimes which we call unit-classes, denoting the initial sets of parameters in multidimensional space (<xref ref-type="supplementary-material" rid="TS1">Supplementary table</xref>, Unit-Classes). We first created a unit-class where both inhibitory and excitatory units had identical physiological values to mimic the units found in the Brendel model. Since the parameter values of both inhibitory and excitatory units were equal, this unit-class was called the Comrade class.</p>
</sec>
<sec id="S2.SS2.SSS2">
<title>2.2.2. Bacon units</title>
<p>We then constructed a second unit-class using physiological values from the literature. Since the unit-class was made from <italic>empirical</italic> experimental data this unit-class was called <italic>Bacon</italic> class, named after Sir Francis Bacon, the father of empiricism. The parameters were based on experimental data collected from the macaque cortex (<xref ref-type="bibr" rid="B44">Povysheva et al., 2013</xref>; <xref ref-type="bibr" rid="B36">Luebke et al., 2015</xref>; <xref ref-type="bibr" rid="B26">Gilman et al., 2017</xref>). The Bacon excitatory unit values for capacitance and leak conductance were derived from membrane time constant and membrane resistance. These, and action potential threshold, were extracted as mean values between <xref ref-type="bibr" rid="B26">Gilman et al. (2017)</xref> and <xref ref-type="bibr" rid="B36">Luebke et al. (2015)</xref> who studied area V1 pyramidal cells in macaque monkeys. The corresponding inhibitory unit values were the basket cell values from Povysheva et al. who studied macaque prefrontal cortex. The equilibrium potential value was based on <xref ref-type="bibr" rid="B1">Amatrudo et al. (2012)</xref> who fitted model parameters to structural and electrophysiological macaque V1 neuron data. Some missing LIF parameters followed earlier simulation studies (<xref ref-type="bibr" rid="B20">Diesmann et al., 1999</xref>; <xref ref-type="bibr" rid="B31">Hokkanen et al., 2019</xref>).</p>
</sec>
<sec id="S2.SS2.SSS3">
<title>2.2.3. HiFi units</title>
<p>HiFi and Comrade units were identical, excluding the output units. The HiFi output units had a much shorter synaptic delay (3 ms =&#x003E; 1 ms) and a higher leak conductance (4 nS =&#x003E; 40 nS) than the Comrade or Bacon units. These physiological changes allowed for a higher fidelity when reconstructing the input signal.</p>
</sec>
<sec id="S2.SS2.SSS4">
<title>2.2.4. Parameter search</title>
<p>We first explored a wide range of physiological parameters around each unit class to determine the dynamic regime of the system (data not shown). Parameters were then explored in detail in this narrowed dynamic range. <xref ref-type="table" rid="T1">Table 1</xref> shows the search ranges and step sizes of the physiological parameters. Note that the three unit-classes&#x2019; starting points were not in the middle of the search spaces. The searches were two-dimensional for capacitance, leak conductance, leak equilibrium potential, and action potential threshold; the first dimension for the inhibitory, and the second for the excitatory units. The synaptic delay within and between the inhibitory and excitatory unit groups was searched in one dimension, i.e., all four (EE, EI, II, IE) connection delays varied together.</p>
</sec>
<sec id="S2.SS2.SSS5">
<title>2.2.5. Computational implementation</title>
<p>The duration of each simulation was 2 s, at 0.1 ms resolution. The first and last 200 ms were omitted for response stability, resulting in 16,000 samples for further analysis. Each main simulation round comprised of 30,000 simulations (3 unit-classes &#x00D7; 5 parameters &#x00D7; about 200 parameter combinations &#x00D7; 10 iterations with independent noise). Altogether, we ran about 400,000 simulations.</p>
<p>Simulations were computed on a workstation equipped with an Intel Xeon Processor (E5 2640&#x2013;2.6 GHz), 128 GB of DDR4 memory, and one NVIDIA GK104GL (Quadro k4200) graphic card with 4GB video memory. The workstation ran the Linux operating system on a SATA III Solid State Drive.</p>
<p>We used the CxSystem2 cortical simulation framework (<xref ref-type="bibr" rid="B3">Andalibi et al., 2019</xref>; <xref ref-type="bibr" rid="B31">Hokkanen et al., 2019</xref>) which has been written on top of the Python-based Brian2 simulator (<xref ref-type="bibr" rid="B27">Goodman and Brette, 2009</xref>) mainly with the purpose of flexible model construction at a higher abstraction level and parallel parameter search.</p>
<p>The analyses, visualizations and automation for iterative runs were performed using in-house developed SystemTools software<sup><xref ref-type="fn" rid="footnote2">2</xref></sup>. This software is publicly available and includes detailed installation instructions and jupyter notebook files for this study. The jupyter notebooks are available for <xref ref-type="fig" rid="F1">Figures 1B&#x2013;D</xref>, <xref ref-type="fig" rid="F2">2</xref>&#x2013;<xref ref-type="fig" rid="F6">6A,D</xref>, <xref ref-type="fig" rid="F7">7A</xref>, <xref ref-type="fig" rid="F8">8</xref>. The heavier simulations (LIF, AdEx, controls with randomized connectivity) were pre-calculated for the notebooks. The code to recalculating these are included into the SystemTools software, and authors are happy to provide further assistance for interested readers.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption><p>Information transfer between input and output for a range of leak conductance (gL) values in both inhibitory and excitatory units. <bold>(A)</bold> The first row shows coherence, Granger causality, transfer entropy and reconstruction error values as a function of varying gL at the Comrade units. Overall, information transfer is better at higher firing rates (rightmost column), although both transfer entropy and reconstruction error show a performance dip at the highest rates. The second row shows corresponding data for the Bacon units. Performance is similar to Comrade units. The third row shows data for Bacon units, when E-E, E-I, I-I, and I-E connections are randomly permuted, essentially losing the learned optimization but preserving the overall connection strength. Firing rate dependence of information transfer is preserved, but the overall level of information transfer is reduced. The fourth row shows data for Bacon units when all EI connections, current injection from input to E units as well as output from E units to output units are randomly permuted. Some information is still transferred, but the overall level is further reduced. <bold>(B)</bold> A boxplot across the data in panel <bold>(A)</bold>. The insert applies to all boxplots in this study: medians (black lines), 25 and 75% quartiles (gray rectangles) and full range (whiskers) of the data.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-17-1011814-g002.tif"/>
</fig>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption><p>HiFi units can reconstruct the input accurately. <bold>(A)</bold> Information transfer as a function of varying capacitance (C) at Comrade and HiFi units. The red arrows show the maximum TE and minimum RE for HiFi units, and the corresponding positions for the Comrade. <bold>(B)</bold> Comrade (red) and HiFi (blue) simulation output at the capacitance values which minimize the reconstruction error for HiFi (C<sub>I</sub> = 200 pF, C<sub>E</sub> = 100 pF). <bold>(C)</bold> Each boxplot contains 240 datapoints (2D search displayed in panel <bold>A</bold>), and each datapoint is the average of 10 iterations with distinct input noise pattern.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-17-1011814-g003.tif"/>
</fig>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption><p>Dynamic range of information transfer attainable with physiological parameter variation. The dashed lines depict the best information transfer rates (referred as <italic>optimal</italic> in the text; input compared to itself as output at varying delays) or the worst rates (<italic>unfit</italic>; input compared to other inputs as outputs).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-17-1011814-g004.tif"/>
</fig>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption><p>Information transfer as a function of firing rate for the 2D search on capacitance. Each dot is the average of 10 simulations with distinct inputs. The dots depict distinct combinations of inhibitory and excitatory unit capacitance. The gray shadings indicate the firing rate ranges where <italic>TE</italic> and <italic>RE</italic> reach their best values, separately for each unit-class.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-17-1011814-g005.tif"/>
</fig>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption><p>Classification across the 2D search on VT. <bold>(A)</bold> Information metrics. <bold>(B)</bold> Classification between inputs and outputs from the information metric. Example selection of ap threshold values (inhibitory unit &#x2013;44 mV, excitatory unit &#x2013;46 mV) lead to moderate mean firing rates (inhibitory unit 0.15 Hz, excitatory unit 0.93 Hz). On the left. The learned connectivity should match the three input patterns to corresponding output unit membrane potential patterns. For each input (row) the highest coherence value (white rectangle) indeed matches the correct input to correct output. <italic>On the right</italic>. Ten iterations of simulated data for different noise patterns provide 30 choices for each information metric (max for coherence, Granger causality and transfer entropy, min for reconstruction error). <bold>(C)</bold> Confusion matrix for calculating accuracy score. Coherence and Granger causality maxima land always at the matching input-output pairs, while transfer entropy have some failures. Reconstruction errors have more failures than hits. <bold>(D)</bold> Accuracy score for each information metric and each ap threshold value pair. The red asterisks depict <italic>p</italic> &#x003C; 0.05, Bonferroni corrected for the <italic>N</italic> parameter combinations in the current search (<italic>N</italic> inhibitory ap threshold values &#x00D7; <italic>N</italic> excitatory ap threshold values).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-17-1011814-g006.tif"/>
</fig>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption><p>Classification accuracy as a function of firing rate. Results from all five parameter searches are averaged. <bold>(A)</bold> The main experiment with three inputs and three outputs, mean of 10 iterations (thick line) and the 95% confidence interval by bootstrapping (shading). Data is binned to 10 distinct firing rates. <bold>(B)</bold> A control experiment with six inputs and outputs, 10 iterations as in the main experiment. <bold>(C)</bold> A control experiment with one-dimensional searches across C, gL, EL and VT, (gL and EL augmented by two values of capacitance) in such a way that each search reaches zero and fully saturated firing rates.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-17-1011814-g007.tif"/>
</fig>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption><p>Results for AdEx model units. <bold>(A)</bold> The adaptation in Comrade units lead to low FR (see <xref ref-type="fig" rid="F1">Figure 1D</xref>), a low-fidelity representation of the input (3<sup>rd</sup> column) and to a shallow f_I slope. <bold>(B)</bold> As panel <bold>(A)</bold> but for Bacon units. <bold>(C)</bold> 2-dimensional parameter search for gL and resulting coherence, Granger causality, transfer entropy and reconstruction error values for the Comrade units. <bold>(D)</bold> Same as panel <bold>(C)</bold>, but for Bacon units. The system cease firing when excitatory unit gL reach 10.5 nS. <bold>(E)</bold> A box-plot comparing information transfer quantities and firing rates for LIF and AdEx models for Comrade and Bacon units. <bold>(F)</bold> Classification accuracy as a function of firing rate for Comrade (blue) and Bacon (orange) units.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-17-1011814-g008.tif"/>
</fig>
</sec>
</sec>
</sec>
<sec id="S3">
<title>3. Evaluation of information transfer</title>
<sec id="S3.SS1">
<title>3.1. Signal transfer metrics</title>
<p>The results show the mean of the matching input-output pairs (mean of #0 input to #0 output, #1 to #1, and #2 to #2) for all the four metrics.</p>
<sec id="S3.SS1.SSS1">
<title>3.1.1. Coherence</title>
<p>For time-series analysis, coherence is used to describe a linear association strength between two data sets whose dependency can be shifted in time. We calculated coherence values between all input and output pairs as:</p>
<disp-formula id="S3.E6">
<label>(6)</label>
<mml:math id="M6"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:mpadded><mml:mo lspace="2.8pt" rspace="2.8pt">=</mml:mo><mml:mfrac><mml:msup><mml:mrow><mml:mo stretchy="false">&#x007C;</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">&#x007C;</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>y</mml:mi><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>where P<sub><italic>xx</italic></sub> and P<sub><italic>yy</italic></sub> are power spectral density estimates for X and Y (the input and output, respectively) and P<sub><italic>xy</italic></sub> is the cross-spectral density estimate of X and Y.</p>
<p>Best latency was the argmax of crosscorrelation between input and output. This was limited on positive latencies, i.e., input preceding output. Both coherence and crosscorrelation were calculated with SciPy signal package (<xref ref-type="bibr" rid="B55">Virtanen et al., 2020</xref>). We report the mean of coherence values from 0 to 100 Hz, sampled at 3-Hz intervals.</p>
</sec>
<sec id="S3.SS1.SSS2">
<title>3.1.2. Granger causality</title>
<p>Granger causality can be used to test if the values in time series X forecast the values of time series Y&#x2013;that is to say if X Granger-causes Y (<xref ref-type="bibr" rid="B28">Granger, 1969</xref>; <xref ref-type="bibr" rid="B25">Geweke, 1982</xref>; <xref ref-type="bibr" rid="B8">Barnett et al., 2009</xref>). Specifically, we test if adding optimally selected and weighed values from input signal X improves our prediction of output signal Y compared to a model in which we only use past values of Y. We pick optimal linear coefficients &#x03B1;<sub><italic>i</italic></sub> for eachY<sub><italic>t</italic>&#x2212;1</sub>,Y<sub><italic>t</italic>&#x2212;2</sub>, &#x2026;, <italic>Y</italic><sub><italic>t</italic>&#x2212;<italic>n</italic><sub>1</sub></sub> and similarly, we select optimal linear coefficients &#x03B2;<sub><italic>i</italic></sub> for each X<sub><italic>t</italic>&#x2212;p</sub>, X<sub><italic>t</italic>&#x2212;(p + 1)</sub>, X<sub><italic>t</italic></sub>&#x2212;(p + 2), &#x2026;, <italic>X</italic><sub><italic>t</italic>&#x2212;<italic>n</italic><sub>2</sub></sub>. The null hypothesis states that adding X to our model does not improve our prediction:</p>
<disp-formula id="S3.E7">
<mml:math id="M7"><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>:</mml:mo><mml:mrow><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>&#x2026;</mml:mn></mml:mrow></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mn>0</mml:mn></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>To calculate Granger causality, our data was first downsampled by a factor of 40. Such downsampling brings successive samples closer to significant information transmission latencies, necessary for a successful evaluation of Granger causality (Lionel Barnett, personal communication). Next, each value was subtracted from its previous value to make the time series stationary. The lag order was picked by Akaike information criterion (<xref ref-type="bibr" rid="B38">McQuarrie and Tsai, 1998</xref>). Max lag was restricted to 100 ms for the main experiment, and the majority of realized lags were between 20 and 90 ms.</p>
<p>We then fit the data with a vector autoregressive model. We compared a univariate model:</p>
<disp-formula id="S3.E8">
<label>(7)</label>
<mml:math id="M8"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi>Y</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mrow><mml:mrow><mml:munderover><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo><mml:mrow><mml:mpadded width="+3.3pt"><mml:mi>i</mml:mi></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:msub><mml:mi>n</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:munderover><mml:mrow><mml:msub><mml:mn>&#x03B1;</mml:mn><mml:mi>i</mml:mi></mml:msub><mml:msub><mml:mi>Y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:msub><mml:mi>v</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>to a bivariate model:</p>
<disp-formula id="S3.E9">
<label>(8)</label>
<mml:math id="M9"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi>Y</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mrow><mml:mrow><mml:munderover><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo><mml:mrow><mml:mpadded width="+3.3pt"><mml:mi>i</mml:mi></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:msub><mml:mi>n</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:munderover><mml:mrow><mml:msub><mml:mn>&#x03B1;</mml:mn><mml:mi>i</mml:mi></mml:msub><mml:msub><mml:mi>Y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:munderover><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo><mml:mrow><mml:mpadded width="+3.3pt"><mml:mi>i</mml:mi></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mi>p</mml:mi></mml:mrow><mml:msub><mml:mi>n</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:munderover><mml:mrow><mml:msub><mml:mn>&#x03B2;</mml:mn><mml:mi>i</mml:mi></mml:msub><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:msub><mml:mi>u</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>by calculating the variances of the residual terms <italic>v</italic> and <italic>u</italic> and plugging them into the <italic>F</italic>-statistic:</p>
<disp-formula id="S3.E10">
<label>(9)</label>
<mml:math id="M10"><mml:mrow><mml:mpadded width="+3.3pt"><mml:mtext>F</mml:mtext></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mrow><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:mo>&#x007B;</mml:mo><mml:mfrac><mml:mrow><mml:mi>v</mml:mi><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>v</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>v</mml:mi><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>u</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>&#x007D;</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>and then calculating the <italic>p</italic>-value. We further interpreted the vector autoregressive model as information in bits by taking the base 2 logarithm of the <italic>F</italic> value.</p>
<p>Granger causality is directly affected by the variance of the residual terms, whereas information-theoretic measures only consider the probability of such deviations. This means that Granger causality is more sensitive and better suited to situations where considering the absolute values of data is important. However, if the variables are produced in a non-linear process, relying on linear Granger causality might not be justified.</p>
<p>While in principle Granger causality does not measure information but the difference in strength of prediction between two models, it does have an information-theoretic interpretation if the residuals are normally distributed: in that case the <italic>F</italic>-statistic is equal to continuous transfer entropy up to a factor of two (<xref ref-type="bibr" rid="B8">Barnett et al., 2009</xref>).</p>
</sec>
<sec id="S3.SS1.SSS3">
<title>3.1.3. Transfer entropy</title>
<p>Transfer entropy is designed to measure the directed, asymmetric information flow from one time-dependent variable to another. It can be understood as the conditional mutual information between past values of time series <italic>X</italic> and the predicted value <italic>Y</italic><sub><italic>t</italic>+1</sub> in time series <italic>Y</italic>, when we already know the past values of <italic>Y</italic>. Formally, it is defined as follows (<xref ref-type="bibr" rid="B9">Bossomaier et al., 2016</xref>):</p>
<disp-formula id="S3.E11">
<label>(10)</label>
<mml:math id="M11"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi>X</mml:mi><mml:mo>&#x2192;</mml:mo><mml:mi>Y</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>M</mml:mi><mml:mi>I</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>Y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>;</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>k</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup><mml:mo stretchy="false">&#x007C;</mml:mo><mml:msubsup><mml:mi>Y</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>l</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>=</mml:mo><mml:munder><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo lspace="0pt" rspace="2.8pt">,</mml:mo><mml:msubsup><mml:mi>y</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>k</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:msubsup><mml:mo lspace="0pt" rspace="2.8pt">,</mml:mo><mml:msubsup><mml:mi>x</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>l</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:msubsup></mml:mrow></mml:munder><mml:mi>p</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mi>y</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>k</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mi>x</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>l</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup><mml:mo stretchy='false'>)</mml:mo><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi><mml:mfrac><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">&#x007C;</mml:mo><mml:msubsup><mml:mi>y</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>k</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mi>x</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>l</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">&#x007C;</mml:mo><mml:msubsup><mml:mi>y</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>k</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<p>To calculate an estimate for transfer entropy, the input signal was first shifted in time to the point where optimal cross-correlation was found between the input and the output. After the time shift <italic>x</italic><sub><italic>p</italic></sub> corresponds to <italic>y</italic><sub><italic>t</italic></sub>. As with Granger causality, data was then downsampled by the factor of 40, leaving us with 400 observations in both the input and output signals. Each value was subtracted from its previous value to make the time series stationary. Embedding dimensions <italic>k</italic> and <italic>l</italic> were set to <italic>k</italic> = 1 and <italic>l</italic> = 1 to limit the number of possible combinations in the data. The continuous amplitude values were quantized into four fixed-length bins and each signal value was rounded to the nearest discrete value. Choice of bin number <italic>n</italic> follows the formula <inline-formula><mml:math id="INEQ2"><mml:mrow><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mroot><mml:mrow><mml:mi>N</mml:mi><mml:mn>/5</mml:mn></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mo>+</mml:mo><mml:mi>l</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:mroot><mml:mo>=</mml:mo><mml:mroot><mml:mrow><mml:mn>400/5</mml:mn></mml:mrow><mml:mn>3</mml:mn></mml:mroot><mml:mo>&#x2248;</mml:mo><mml:mn>4</mml:mn></mml:mrow></mml:math></inline-formula>, where <italic>N</italic> is the sample size.</p>
<p>We formed a three-dimensional 4 &#x00D7; 4 &#x00D7; 4 matrix where each cell corresponded to a discrete three-dimensional vector (<italic>Y</italic><sub><italic>t</italic>+1</sub>, Y<sub><italic>t</italic></sub>, <italic>X<sub><italic>p</italic></sub></italic>). We iterated through the time series and increased the observation counter by one in the cell that corresponded to the observed vector of each step.</p>
<p>Next, we estimated two conditional probabilities from this matrix:</p>
<disp-formula id="S3.E12">
<label>(11)</label>
<mml:math id="M12"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:mi>P</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>Y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mtext>&#x007C;</mml:mtext><mml:msub><mml:mi>Y</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mn>&#x00A0;</mml:mn><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>P</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>Y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mtext>&#x007C;</mml:mtext><mml:msub><mml:mi>Y</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>&#x2229;</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mi>p</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math>
</disp-formula>
<p>where <italic>y</italic><sub><italic>t</italic>+1</sub><italic>y</italic><sub><italic>t</italic></sub><italic>x</italic><sub><italic>p</italic></sub> &#x2208; &#x007B;1, 2, 3, 4&#x007D;. Estimates for entropies <italic>H</italic>(<italic>Y</italic><sub><italic>t</italic>+1</sub>&#x007C;<italic>Y</italic><sub><italic>t</italic></sub>) and <italic>H</italic>(<italic>Y</italic><sub><italic>t</italic>+1</sub>&#x007C;<italic>Y</italic><sub><italic>t</italic></sub>,<italic>X</italic><sub><italic>p</italic></sub>) are calculated based on the estimated probabilities. Ultimately, we get:</p>
<disp-formula id="S3.E13">
<label>(12)</label>
<mml:math id="M13"><mml:mrow><mml:mi>T</mml:mi><mml:mpadded width="+3.3pt"><mml:mi>E</mml:mi></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mi>H</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>Y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x007C;</mml:mo><mml:msub><mml:mi>Y</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mi>H</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>Y</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo lspace="0pt" rspace="2.8pt">&#x007C;</mml:mo><mml:msub><mml:mi>Y</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mi>p</mml:mi></mml:msub><mml:mo lspace="0pt" rspace="2.8pt">)</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>With four value bins, the theoretical maximum for transfer entropy is 2 bits.</p>
<p>The result given by this method can be interpreted as follows: a receiver observes the state of output signal <italic>Y</italic> at time point <italic>t</italic> and now has a general probability distribution for the state of <italic>Y</italic> at time point <italic>t</italic> + 1 and its entropy <italic>H (Y).</italic> If the receiver now observes the state of signal <italic>X</italic> at an optimal time point <italic>p</italic>, how many bits of entropy are reduced from <italic>Y</italic><sub><italic>t</italic>+1</sub>?</p>
<p>Unlike Granger causality, transfer entropy is not influenced by the absolute values of residual terms. It simply measures the probabilities of different observations occurring based on what we already know about previous input and output values, and the information contained in these observations based on their probabilities. Another advantage of transfer entropy is that it does not require linearity or any other assumptions about the process in which the signals were produced apart from stationarity.</p>
<p>However, transfer entropy requires multiple instances from each of the <italic>n<sup>l</sup></italic> <sub>&#x002A;</sub> <italic>n<sup>k</sup></italic> <sub>&#x002A;</sub> <italic>n</italic> possible value combinations to provide accurate results. In practice, this forces us to use short embedding vectors. Using longer embedding vectors would give us better results, as the state of our signal is generally determined by more than one previous lagged value.</p>
</sec>
<sec id="S3.SS1.SSS4">
<title>3.1.4. Normalized reconstruction error</title>
<p>We used a similar implementation to Brendel model, who first computed target output by leaky integration of the input; tau of the leak corresponded to the output neuron group tau, 31.25 ms for the Comrade and Bacon units and 3.125 ms for the HiFi whose output group leaked ten times more than the other classes. In contrast to Brendel model, our output has a unit (mV). To get both signals to the same space, we normalized both the target output and the simulated output to a standard scale (&#x2212;1 1). Finally, the scaled target output <italic>x</italic>(<italic>t</italic>), was compared to the scaled simulated output, <inline-formula><mml:math id="INEQ3"><mml:mrow><mml:mover accent="true"><mml:mi>x</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula>.</p>
<disp-formula id="S3.E14">
<label>(13)</label>
<mml:math id="M14"><mml:mrow><mml:mrow><mml:mi>R</mml:mi><mml:mpadded width="+3.3pt"><mml:mi>E</mml:mi></mml:mpadded></mml:mrow><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mfrac><mml:mrow><mml:mo largeop="true" symmetric="true">&#x2211;</mml:mo><mml:mrow><mml:mi>V</mml:mi><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:mi>x</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mover accent="true"><mml:mi>x</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mrow><mml:mo largeop="true" symmetric="true">&#x2211;</mml:mo><mml:mrow><mml:mi>V</mml:mi><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>where RE is the reconstruction error, normalized with target output variance. This gives special meaning to error value = 1; this is achieved if there are no spikes in the excitatory group, and the membrane potentials of the output group&#x2019;s units stay at the resting level. Values above one may result from time shift or other inaccuracies in the system and indicate very poor replication of the input.</p>
</sec>
</sec>
<sec id="S3.SS2">
<title>3.2. Classification performance</title>
<sec id="S3.SS2.SSS1">
<title>3.2.1. From information metric to accuracy score</title>
<p>The original Brendel model learned to replicate the three input signals at the corresponding three output units. In the present study, we added the transformation of the model to biophysical values, the LIF model at the output units, parallel action potential generation, and synaptic delays, all without retraining, all transformations degrading the performance of the system from the optimal replication. Our information transmission metrics provide parametric values allowing the selection of the best output unit for each input. This selection results in a 3-by-3 confusion matrix. The confusion matrix allows us to use information transfer metrics to measure how accurate the network can classify each input to their corresponding output. The maximum value was used for coherence, Granger causality and transfer entropy. The minimum value was used for the reconstruction error. A separate simulation experiment used six inputs and outputs, resulting in a 6-by-6 matrix, to control for the ceiling effect in classification. In all other regards, this experiment was identical to the 3-by-3 experiment.</p>
</sec>
<sec id="S3.SS2.SSS2">
<title>3.2.2. Statistical testing of the accuracy</title>
<p>We are testing the accuracy of a classifier that tries to match input signals with the correct output signal using different criteria: coherence, Granger causality, transfer entropy, and reconstruction error. We generate random input signals <italic>x<sub>1</sub>, x<sub>2</sub>, x<sub>3</sub>, &#x2026; x<sub><italic>i</italic></sub></italic>, inject them into the neural simulation network and then read the outcoming signals, <italic>y<sub>1</sub>, y<sub>2</sub>, y<sub>3,</sub> <sub>&#x2026;</sub> y<sub><italic>j</italic></sub>.</italic> The process is repeated <italic>m</italic> times, resulting in <italic>m&#x002A;i</italic> trials, denoted by <italic>n</italic>. The synaptic connectivity in the simulation network has been converged to optimal values during training so that the normalized mean variance of the reconstruction error (<italic>RE</italic>) would ideally be minimized between certain input and output pairs, such that for any specific <italic>x</italic><sub><italic>q</italic></sub> we would get min<sub>&#x007B;</sub><italic>y</italic> &#x2208; <italic>y</italic><sub>1,</sub> <italic>y</italic><sub>2</sub>, <italic>y</italic><sub><italic>j</italic></sub>&#x007D;<italic>RE</italic>(<italic>x</italic><sub><italic>q</italic></sub>,<italic>y</italic>) = <italic>y</italic><sub><italic>l</italic></sub>, <italic>y<sub>l</sub></italic> being the &#x201C;ideal pair&#x201D; for that input. In this case, we have three random input signals and three output signals each round and the process is repeated ten times with different inputs, giving us <italic>i = 3, j = 3, m = 10</italic>, and <italic>n = 30</italic>.</p>
<p>We create a matrix <italic>A</italic> with <italic>i = 3</italic> rows representing inputs and <italic>j = 3</italic> columns representing outputs. The neural system places the <italic>n = 30</italic> observations into the matrix where the row <italic>q</italic> for each input <italic>x</italic><sub><italic>q</italic></sub> is determined by <italic>q</italic> &#x2208; &#x007B;1, 2, 3&#x007D; and column <italic>o</italic> by <italic>f</italic>(<italic>x</italic><sub><italic>q</italic></sub>,<italic>y</italic>) = <italic>y</italic><sub><italic>o</italic></sub>, where <italic>f</italic>(<italic>x</italic><sub><italic>q</italic></sub>,<italic>y</italic>) seeks for the value among <italic>y</italic> &#x2208; &#x007B;<italic>y</italic><sub>1,</sub> <italic>y</italic><sub>2</sub>, <italic>y</italic><sub>3</sub>&#x007D; that minimizes or maximizes our criterion. The correct pair would be <italic>y</italic><sub><italic>o</italic></sub> = <italic>y</italic><sub><italic>l</italic></sub> and the correct cell <italic>A</italic>(<italic>q</italic>, <italic>o</italic>) = <italic>A</italic>(<italic>q</italic>, <italic>l</italic>). How the observations end up being distributed in the matrix depends on the ability of the system to separate the signals as well as the inputs that we generated for testing.</p>
<p>The accuracy score is calculated from this matrix as the ratio between the number of correctly classified signals <italic>k</italic> and all signals <italic>n</italic>. Because the data consists of repeated Bernoulli trials, all being modeled as having the same probability of successful classification <italic>p</italic>, the observed accuracy score follows a binomial distribution B (<italic>n, p</italic>).</p>
<p>We hypothesize that the classifier does not work and that in each trial every input has an equal probability of being matched with any of the outputs. Therefore:</p>
<disp-formula id="S3.E15">
<label>(14)</label>
<mml:math id="M15"><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>:</mml:mo><mml:mrow><mml:mpadded width="+3.3pt"><mml:mi>p</mml:mi></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mrow><mml:mpadded width="+2pt"><mml:mfrac><mml:mn>1</mml:mn><mml:mi>j</mml:mi></mml:mfrac></mml:mpadded><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mpadded width="+2pt"><mml:mi>d</mml:mi></mml:mpadded><mml:msub><mml:mi>H</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:mrow><mml:mo>:</mml:mo><mml:mrow><mml:mpadded width="+3.3pt"><mml:mi>p</mml:mi></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">&gt;</mml:mo><mml:mpadded width="+2pt"><mml:mfrac><mml:mn>1</mml:mn><mml:mi>j</mml:mi></mml:mfrac></mml:mpadded></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>Under the null hypothesis, instances where the classifier&#x2019;s accuracy score is higher than the expected <italic>E</italic>(<italic>k</italic>/<italic>n</italic>) = <italic>p</italic> = 1/<italic>j</italic> are assumed to be generated by a lucky choice of test data. In this case <italic>KB</italic>(30, 1/3) and we would expect to see 10 signals out of 30 being sorted correctly.</p>
<p>We want to know the probability of observing an accuracy score of <italic>at least k/n</italic> by chance alone. This probability can be directly calculated from the sum of the binomial distribution&#x2019;s right tail:</p>
<disp-formula id="S3.E16">
<label>(15)</label>
<mml:math id="M16"><mml:mrow><mml:mi>P</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mpadded width="+3.3pt"><mml:mi>K</mml:mi></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">&#x2265;</mml:mo><mml:mi>k</mml:mi><mml:mo lspace="0pt" rspace="2.8pt">)</mml:mo></mml:mrow><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:munderover><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo><mml:mrow><mml:mpadded width="+3.3pt"><mml:mi>i</mml:mi></mml:mpadded><mml:mo lspace="0pt" rspace="2.8pt">=</mml:mo><mml:mi>k</mml:mi></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mrow><mml:mo>(</mml:mo><mml:mfrac linethickness="0.0pt"><mml:mi>n</mml:mi><mml:mi>i</mml:mi></mml:mfrac><mml:mo>)</mml:mo></mml:mrow><mml:msup><mml:mi>p</mml:mi><mml:mi>i</mml:mi></mml:msup><mml:msup><mml:mi>p</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>-</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:math>
</disp-formula>
<p>using, of course, <italic>p</italic> = 1/<italic>j</italic>. We choose a significance level of &#x221D; and reject the null hypothesis if <italic>P</italic>(<italic>K</italic> &#x2265; <italic>k</italic>) &#x003C; &#x221D;. We chose the &#x221D; = 0.05, Bonferroni corrected for the <italic>N</italic> trials in each parameter search.</p>
</sec>
</sec>
</sec>
<sec id="S4" sec-type="results">
<title>4. Results</title>
<sec id="S4.SS1">
<title>4.1. Comrade and Bacon unit classes are sensitive to membrane parameters</title>
<p>We studied the signal transmission properties of a trained neural network (<xref ref-type="fig" rid="F1">Figure 1A</xref>) and how these properties are affected by the biophysical parameters of the network. We proceed stepwise from the Brendel et al. system, which becomes the Comrade unit class. Then we step toward biological realism with Bacon class and toward better reconstruction with HiFi class. Finally, we test the Comrade and Bacon unit classes with AdEx units.</p>
<p>After learning, the connections were first scaled to the nanosiemens scale to allow for adequate firing, exemplified by the network output being able to follow the sine function and only fire at the peaks (<xref ref-type="fig" rid="F1">Figure 1B</xref>). <xref ref-type="fig" rid="F1">Figure 1C</xref> shows the three input signals, together with the corresponding output membrane voltage. The output signals (purple) showed a poor reconstruction of the input signals (dark), with most of the high frequencies being lost, although the output signal was able to follow the overall silhouette of the input signal. The average firing frequency was within the physiological range (<xref ref-type="bibr" rid="B50">Sclar et al., 1990</xref>; <xref ref-type="bibr" rid="B6">Bakken et al., 2021</xref>) with inhibitory units firing at a slightly higher frequency than excitatory units (<xref ref-type="fig" rid="F1">Figure 1D</xref>). <xref ref-type="fig" rid="F1">Figure 1E</xref> shows the firing frequency of the model unit to increasing step current injections (input-frequency, or f-I curve) and representative excitatory and inhibitory unit membrane voltage traces. These results show the model, although presenting a poor reconstruction, behaves in a plausibly physiological manner. To understand the general pass-band characteristics of the network, we tested each unit class with 1&#x2013;99 Hz sinusoidal inputs (data not shown). Above 30-Hz, the signal started to fail, and at 40-Hz spiking stopped for all unit classes, showing an inability to pass high-frequency regular oscillations and suggesting poor transmission of high frequencies overall.</p>
<p>Before examining the information transfer characteristics of our network, we sought to establish the dynamic range of the different parameters in which the network was functional (data not shown). Next, we did a parameter search (<xref ref-type="table" rid="T1">Table 1</xref>) for capacitance (C), leak conductance (gL), voltage threshold (VT) of action potential generation, leak equilibrium potential (EL), and synaptic delay (EE, EI, II, IE connections, EI below). We then observed how different information transfer metrics [coherence (<italic>Coh</italic>), Granger causality (<italic>GC</italic>), transfer entropy (<italic>TE</italic>), reconstruction error (<italic>RE</italic>)] between the input and output units, as well as the firing rate (<italic>FR</italic>) of excitatory units, behave as a function of the parameters. The inhibitory unit <italic>FR</italic> followed closely the excitatory units and thus the data was omitted below. We noted that the parameter values that maximize information transfer for each metric were somewhat different.</p>
<p><xref ref-type="fig" rid="F2">Figure 2</xref> shows the 2-dimensional (inhibitory, excitatory) search results on gL values across the dynamic ranges and compares the information transmission metrics to <italic>FR</italic> on the right. We can see that the gL values that maximize <italic>Coh</italic> and <italic>GC</italic> were somewhat different from those of <italic>TE</italic>, which in turn were different from those that minimized <italic>RE</italic>. Overall the highest information transmission appeared at high firing rates, dipping close to saturation for all metrics, but most clearly for <italic>TE</italic> and <italic>RE</italic>. These data show that a varying leak conductance within the dynamic range of the system has a strong effect (factor of &#x223C;5) on information transfer.</p>
<p>To test the relative value of connectivity versus the changes in gL, we randomly permuted the four sets of EI connections. The permutation was done within each set by randomly shuffling the post-synaptic target index, thus preserving the connection strengths in the system. The permutation only had a subtle effect on the 2D topology, improving slightly the sensitivity to gL at inhibitory units. We then permuted all connections, including EI shuffling above, but also the input to excitatory and excitatory to output units. This change led to <italic>Coh</italic> and <italic>GC</italic> metrics to collapse, significantly reducing the amount of information transfer. <xref ref-type="fig" rid="F2">Figure 2B</xref> shows the boxplots summarizing the information transmission magnitudes across the 2D search. The medians were similar for the Bacon data regardless of learning, whereas the Comrade start point showed always the highest information transmission and the highest firing rate (Friedman test <italic>p</italic> &#x003C; 0.001, <italic>N</italic> = 405).</p>
<p>These results show that randomizing local connectivity (EI) had a surprisingly subtle effect on the overall metrics, while randomizing all connections trivially collapsed the <italic>Coh</italic> and <italic>GC</italic> metrics and increased RE (random input to output relation). The paradoxical increase of TE for randomizing all connections suggests limited value of the metrics, perhaps related to non-zero spiking at high excitatory gL, the single sampling point in time or non-optimal temporal shift of the point. The varying leak conductance caused a major variation in information transfer, surpassing the variation caused by permuting the EI connectivity.</p>
</sec>
<sec id="S4.SS2">
<title>4.2. HiFi output units are necessary for good reconstruction</title>
<p>Although previous data illustrated well the important role gL values have in determining information transfer, the output signal still shows a poor reconstruction. To try to improve signal reconstruction, we optimized output units by increasing their gL from 4 to 40 nS and lowering the delay between the excitatory and output units from 3 to 1 ms without altering the middle layer. This was done to shorten the memory trace of earlier events and thus allow for fast replication of information available in the middle layer. <xref ref-type="fig" rid="F3">Figure 3A</xref> shows that enabling fast response in output units results in drastically different parameter topology for <italic>TE</italic> and <italic>RE</italic> (<xref ref-type="fig" rid="F3">Figure 3A</xref>, red arrows at <italic>TE</italic> maximum and <italic>RE</italic> minimum for HiFi) as well as reduction of <italic>RE</italic> values, while the parameter topology and range for <italic>GC</italic> and <italic>Coh</italic> are preserved. Excitatory group <italic>FR</italic> are identical because there is no change in the excitatory (E) group between Comrade and HiFi. This result shows that the different metrics have clearly individual characteristics with <italic>Coh</italic> and <italic>GC</italic> being most consistent.</p>
<p>At the minimal error for HiFi units (C<sub><italic>I</italic></sub> = 200 pF, C<sub><italic>E</italic></sub> = 100 pF), the Comrade unit fails to replicate the high frequencies (<xref ref-type="fig" rid="F3">Figure 3B</xref>). This is clearly visible in the power spectral density, where low frequencies dominate for the Comrade output units, whereas the HiFi units follow the input at higher frequencies. There is only a minor extra lag in cross correlation peak between the input and output for Comrade, and the coherence between input and output is worse for low frequencies, compared to HiFi. These results suggest that for accurate replication of fast components in sensory data, readout neurons play a key role in the accurate reconstruction of the signal. Moreover, information transfer topology is more similar between the four metrics in HiFi than in the Comrade units, suggesting similar dependence on capacitance with faster and short-memory readout.</p>
<p><xref ref-type="fig" rid="F3">Figure 3C</xref> shows the magnitudes of readout metrics across the three unit-classes. The change from Comrade to HiFi improves especially <italic>TE</italic> and <italic>RE</italic>, in line with their sensitivity to fast transients; <italic>TE</italic> includes only one optimally shifted time point as input and output history and <italic>RE</italic> is very sensitive to delay (<xref ref-type="supplementary-material" rid="FS1">Supplementary Figure 1</xref>). For all metrics, the group comparisons were significant (Friedman test, <italic>p</italic> &#x003C; 0.001).</p>
<p>Mathematically, the capacitance (<xref ref-type="fig" rid="F3">Figure 3</xref>) and leak conductance (<xref ref-type="fig" rid="F2">Figure 2</xref>) are not independent parameters, because they are linked by membrane time constant. They, however, provide different views to approximate membrane surface area and ionic conductances, respectively.</p>
<p>In a network with fixed synaptic delays between excitatory and inhibitory units, <xref ref-type="bibr" rid="B47">Rull&#x00E1;n Bux&#x00F3; and Pillow (2020)</xref> described a back-and-forth synchronous signal propagation. Our network operates in asynchronous state (<xref ref-type="fig" rid="F3">Figure 3B</xref>). This is likely influenced by the white noise input to excitatory units. However, we can not exclude temporally or spatially limited synchrony phenomena.</p>
</sec>
<sec id="S4.SS3">
<title>4.3. Physiological parameters cause extensive variation in information transfer</title>
<p>To better quantify the range of information transfer due to changes in physiological parameters, we compared the 2-dimensional parameter search results to information transfer at the optimal and least fit possible numerical transfer rates. The optimal condition comprised a copy of the input signal at the output at different delays, and the most unfit condition comprised one of the two other inputs as the output (<xref ref-type="supplementary-material" rid="FS1">Supplementary Figure 1</xref>). The optimal <italic>Coh</italic> and <italic>GC</italic> values were dependent on delay. <italic>GC</italic> was also sensitive to a maximum allowed delay in the VAR model (100 ms in this study). <italic>RE</italic>, again, was very sensitive to delay, reaching random levels shortly after 10 ms shift. <italic>TE</italic> was not sensitive to delay, because we look only at time history = 1, with optimal time shift.</p>
<p><xref ref-type="fig" rid="F4">Figure 4</xref> shows the range of the information transfer in simulations against the optimal and unfit values (highest and lowest mean values in <xref ref-type="supplementary-material" rid="FS1">Supplementary Figure 1, upper and lower</xref> dashed lines, respectively, in <xref ref-type="fig" rid="F4">Figure 4</xref>), for all five parameter searches. Varying C, gL, EL, and VT had drastic effects on <italic>Coh</italic>, ranging almost the full range of values. For <italic>GC</italic>, the current system fails to reach the optimal values, covering systematically less than 1/6 of the range. Changing the max allowed lag to 200 ms increased optimal values for <italic>GC</italic>, especially at longer latencies (<xref ref-type="supplementary-material" rid="FS1">Supplementary Figure 1</xref>, gray curve), but the <italic>GC</italic> values for simulated data were almost unchanged (For C, <xref ref-type="supplementary-material" rid="FS2">Supplementary Figure 2</xref>). <italic>TE</italic> reached from minimum values to half the maximum values. <italic>RE</italic> was poor for most Comrade and Bacon simulations (values above one), whereas HiFi reached value 0.32, indicating they were best able to accurately follow the input signal. In contrast to the other metrics, changing the synaptic delay in the EI connections from 0.5 to 25 ms had only a minimal effect on the information transfer.</p>
</sec>
<sec id="S4.SS4">
<title>4.4. Action potential frequency drives information transfer</title>
<p>We observed that for LIF units, higher frequencies were associated with better scores in every information transfer metric in all three unit-classes (<xref ref-type="fig" rid="F5">Figure 5</xref>). The association was non-linear with a saturation point around 300 Hz where the information transmission started failing. Upon closer examination we realized this saturation occurs due to the immediate activation of neurons after their refractory period was over (data not shown), resulting in loss of entropy. This failure appeared first in <italic>Coh</italic> and <italic>GC</italic>, whereas the <italic>TE</italic> and <italic>RE</italic> turned from best to failure only at somewhat higher frequencies (shaded in <xref ref-type="fig" rid="F5">Figure 5</xref>).</p>
</sec>
<sec id="S4.SS5">
<title>4.5. Robust classification in the dynamic regime</title>
<p>We compared the membrane potential of each output unit to the each of the input signals. From this 3-by-3 comparison, the winner was the best information transfer value. After running this analysis on 10 simulation rounds with different input signals, the analysis resulted in a confusion matrix where, ideally, each input unit would be correctly paired to its output unit 10 times.</p>
<p>To give an example of classification, <xref ref-type="fig" rid="F6">Figure 6A</xref> shows the 2D search data on the action potential threshold (VT) for the Comrade unit class. The highest <italic>Coh</italic> and <italic>GC</italic> values are in the high firing rate (&#x003E;100 Hz) regime but drop for the highest rates, as with search on C (<xref ref-type="fig" rid="F5">Figure 5</xref> top). The <italic>TE</italic> and <italic>RE</italic> have a different topology, with the best values at higher frequencies (<xref ref-type="fig" rid="F3">Figure 3A</xref>). For the selected VT values (inhibitory unit &#x2212;44 mV, excitatory unit &#x2212;46 mV, <xref ref-type="fig" rid="F6">Figure 6B</xref>), <italic>Coh</italic> and <italic>GC</italic> have great classification performances, with these two metrics being able to correctly pair every input to their output (<xref ref-type="fig" rid="F6">Figure 6C</xref>) despite low mean firing rates (inhibitory unit 0.04 Hz, excitatory unit 0.93 Hz). <italic>TE</italic> also offers a relatively good classification performance, correctly classifying most of the simulations (accuracy score 0.8, <italic>p</italic> &#x003C; 0.001, Bonferroni corrected for the <italic>N</italic> 2D search items, 95% ci 0.58&#x2013;0.90). <italic>RE</italic> shows a poor classification performance being unable to accurately classify most input-output pairs (accuracy score 0.37, <italic>p</italic> &#x003E; 0.05, 95% ci 0.20&#x2013;0.56). From these data we can observe that classification is a robust measure for information transfer and it enables direct comparison of the different metrics.</p>
<p>Looking at the accuracy scores through the AP threshold values we were able to see that even in conditions of low information transfer, the network was able to generate significant accuracy scores (compare <xref ref-type="fig" rid="F6">Figures 6A&#x2013;D</xref>, where red dots indicate statistically significant accuracy). This robust effect could be seen in all unit-classes and membrane parameters (<xref ref-type="fig" rid="F7">Figure 7</xref>). <xref ref-type="fig" rid="F7">Figure 7A</xref> shows the classification accuracy score for the main experiment as a function of firing rate, with all physiological parameter searches averaged together. For <italic>Coh</italic> and <italic>GC</italic>, classification accuracy scores remain at one, excluding the first and last aggregate <italic>FR</italic> bins, which include very low and high <italic>FRs</italic>. For Bacon and Comrade units, the <italic>TE</italic> and <italic>RE</italic> values dip, in addition, at the middle <italic>FR</italic> ranges. To exclude that the excellent accuracy scores result from a ceiling effect, <xref ref-type="fig" rid="F7">Figure 7B</xref> shows the same 2D search with 10 iterations as in the main experiment, but for six inputs and six outputs (chance rate 1/6). The results are similar to the main experiment: only the first and last <italic>FR</italic> bin showed a drop for the <italic>Coh</italic> and <italic>GC</italic>, while the Comrade and Bacon units replicate the mid-frequency dip for the <italic>TE</italic> and <italic>RE</italic>. Finally, <xref ref-type="fig" rid="F7">Figure 7C</xref> shows the data for an extensive 1D search across the parameter variations, with the aim of better covering the zero and fully saturated <italic>FRs</italic>. As expected, all four metrics drop closer to random accuracy scores (1/3) at the first and last <italic>FR</italic> bins.</p>
</sec>
<sec id="S4.SS6">
<title>4.6. Adaptation leads to low-frequency code</title>
<p>In biological neurons, intracellular current injection to soma together with measurement of the membrane voltage have been used to characterize the response properties of a neuron. LIF model has a limited ability to replicate such membrane voltage traces, whereas relatively simple adaptive models fit considerably better (<xref ref-type="bibr" rid="B46">Rauch et al., 2003</xref>; <xref ref-type="bibr" rid="B34">Jolivet et al., 2004</xref>; <xref ref-type="bibr" rid="B11">Brette and Gerstner, 2005</xref>; <xref ref-type="bibr" rid="B24">Gerstner and Naud, 2009</xref>). Thus we next did the Comrade and Bacon simulations with AdEx units which extends the LIF model by one dynamic adaptation variable <italic>w</italic>, and three additional model parameters, <italic>a, b</italic>, and <italic>tau</italic><sub><italic>w</italic></sub>. These model parameters were selected in such a way that the unit adaptation timing qualitatively mimicked the adaptation current of a fast spiking type II inhibitory unit and an excitatory unit (<xref ref-type="bibr" rid="B39">Mensi et al., 2012</xref>).</p>
<p>Adaptation and exponential action potential generation mechanism in Comrade units (at start point, <xref ref-type="fig" rid="F8">Figure 8A</xref>) led to lower <italic>FR</italic> (1&#x2013;3 vs. 8&#x2013;9 Hz, <xref ref-type="fig" rid="F1">Figure 1D</xref>), a low-fidelity representation of the input, and to a clearly less steep f-I curve than with LIF units (<xref ref-type="fig" rid="F1">Figure 1E</xref>). Bacon unit firing (<xref ref-type="fig" rid="F8">Figure 8B</xref>) were even more sparse. Nevertheless, the output looks like an abstraction of the input signal, albeit delayed. The f-I curves are more steep for inhibitory than excitatory units, in line with experimental data (<xref ref-type="bibr" rid="B39">Mensi et al., 2012</xref>).</p>
<p>A clear difference between AdEx and LIF units emerge for the information transfer for varying leak conductance (<xref ref-type="fig" rid="F8">Figure 8C</xref>). For Comrade AdEx units, both <italic>Coh</italic> and <italic>GC</italic> show their highest values with small leak in both inhibitory and excitatory population (the left upper corner of the plot). However, the highest firing rates are at high leak in inhibitory and low leak in excitatory units (left lower corner). For LIF units the two topologies overlap, and the information transmission follows the firing rate (max at left lower corner, <xref ref-type="fig" rid="F2">Figure 2A</xref>). The <italic>TE</italic> for AdEx is more in line with the LIF results, with best values with the highest FR. The <italic>RE</italic> is more difficult to interpret and the error values are consistently very high (&#x003E;1). For Bacon AdEx units the system cease firing altogether with high leak in excitatory population, but otherwise there is a similar trend; the highest <italic>Coh</italic> and <italic>GC</italic> values are off the highest <italic>FR</italic> (<xref ref-type="fig" rid="F8">Figure 8D</xref>).</p>
<p><xref ref-type="fig" rid="F8">Figure 8E</xref> shows that the LIF model results in systematically higher information transmission and firing rates [Wilcoxon signed rank test <italic>p</italic> &#x003C; 0.001 between LIF and AdEx models, tested separately (<italic>N</italic> = 10) for each metrics plus the <italic>FR</italic> and Comrade and Bacon units].</p>
<p><xref ref-type="fig" rid="F8">Figure 8F</xref> shows that classification accuracy for <italic>Coh</italic> and <italic>GC</italic> drops only at the lowest <italic>FR</italic> bin, as with LIF units (<xref ref-type="fig" rid="F7">Figure 7</xref>). The firing rates never exceed 150 Hz and thus there is no high-frequency saturation with accuracy drop, as with the LIF units. The <italic>TE</italic> and <italic>RE</italic> show somewhat more modest accuracies, but well above the 1/3 chance level.</p>
</sec>
</sec>
<sec id="S5" sec-type="discussion">
<title>5. Discussion</title>
<sec id="S5.SS1">
<title>5.1. Accurate reconstruction is costly, classification is cheap</title>
<p>In the present study, we used a simple spiking network which had learned to replicate the inputs at the membrane voltage of the corresponding output units (<xref ref-type="bibr" rid="B10">Brendel et al., 2020</xref>). After learning, we implemented delay, parallel firing and varied the biophysical parameters of the system. We were interested in exploring how cell physiology could affect information transfer and experimenting with some key information transfer metrics. Our results show that accurately reconstructing an input signal is a difficult task for a neuronal system; only a few parameter combinations in HiFi class reached low <italic>RE</italic> values. This narrow parameter range combined with the expensive nature of high-frequency firing makes signal reconstruction an inefficient way to represent information. Compared to LIF, the AdEx model reduced firing rates and information transmission and disconnected the highest information transmission from the highest firing rates of the system. These differences may emerge from adaptation or from the exponential action potential mechanisms. AdEx model did not, however, improve replication of the input signal in the output units. Despite the poor reconstruction, <italic>Coh</italic> and <italic>GC</italic> indicated some degree of information transfer throughout most of the dynamic range. Consequently, even very modest firing rates showed significant classification, suggesting that classification is a more achievable task for biological systems. These results have interesting theoretical implications for biological systems as they place classification as a possible model for representation.</p>
</sec>
<sec id="S5.SS2">
<title>5.2. Measuring information transfer in neural systems</title>
<p>We selected <italic>RE</italic> as one of our performance metrics because it was used in the reference work (<xref ref-type="bibr" rid="B10">Brendel et al., 2020</xref>). It is closely related to root-mean square (RMS) metric, common in machine learning applications. The RMS metric, as well as other time-series forecasting metrics, include timepoint-by-timepoint subtraction of the data from the prediction (<xref ref-type="bibr" rid="B14">Caruana and Niculescu-Mizil, 2004</xref>; <xref ref-type="bibr" rid="B15">Cerqueira et al., 2017</xref>; <xref ref-type="bibr" rid="B37">Makridakis et al., 2018</xref>; <xref ref-type="bibr" rid="B35">Lara-Benitez et al., 2021</xref>), but in addition, the RMS metric is considered to be a good general-purpose metric also for binary classification problems (<xref ref-type="bibr" rid="B14">Caruana and Niculescu-Mizil, 2004</xref>). We show that <italic>RE</italic> is very sensitive to delays, reaching random performance in 10 ms with our input signal. However, in both macaques and humans, visual processing take place in longer timescales (<xref ref-type="bibr" rid="B48">Salmelin et al., 1994</xref>; <xref ref-type="bibr" rid="B12">Bullier, 2001</xref>) making <italic>RE</italic> a less than an ideal tool to study signal transfer in biological models. The need to compare computational models and experimental data makes finding and using alternative methods to analyze information transfer important.</p>
<p>Since the development of information theory (<xref ref-type="bibr" rid="B51">Shannon, 1948</xref>), various methods have been developed to quantify information content and information transfer (<xref ref-type="bibr" rid="B2">Amblard and Michel, 2012</xref>). The purpose of using transfer entropy as a measure for information transfer in our work is to obtain an information-theory-based measure that reflects the reduction of uncertainty in the output signal based on observations from the input. Transfer entropy has two major advantages. Firstly, the result it yields is easily interpretable and can be compared against clearly defined lower and upper bounds. Secondly, it is model-independent and does not require assumptions about the nature of interactions between the input and the output. As such it can be applied to a simulated neural system that does not transfer signals in a mathematically prespecified way. On the downside it does not describe or predict these interactions any further by suggesting a model&#x2013;it simply tells if one variable can reduce entropy from the other. Transfer entropy can be understood as the decrease in the number of binary questions that one needs to ask to deduce the state of an output value after observing a single input value. In this study, transfer entropy shows how much the input can reduce the uncertainty of the resulting output. Our results show that transfer entropy is not only highly sensitive to changes in biophysical parameters, but also tenuous as an information transfer metric if, as in this work, data is limited to one time point with a time shift.</p>
<p>Granger causality is another widely used method for determining a predictive relationship between two signals. It measures the difference in explained variation between two linear time series models: one that only uses past values of the output itself as a predictor of its future and one that adds an input signal as a second predictor. The result it gives is a statistical test on whether this observed difference is purely random. Granger causality is in a sense more specific than transfer entropy: it assumes linearity, fits a model, and estimates how this model is affected by the input data values, not just their estimated statistical frequencies.</p>
<p>The idea behind Granger causality is nonetheless similar to the idea behind transfer entropy: it estimates the increase in what we know about the output based on observations about the input. Granger causality and transfer entropy are proven to be equivalent for Gaussian variables, but the equivalence does not show up in our results. This difference has probably two different origins. First, Granger causality only corresponds to continuous transfer entropy, which takes into account the reduction of entropy in the intermediate steps, whereas transfer entropy needs to be discretized (<xref ref-type="bibr" rid="B49">Schreiber, 2000</xref>). Second, we utilized a high lag for Granger causality; after lag order selection it captured up to 90 ms of temporal information. In contrast, the high dimensionality limited transfer entropy to a single time point. Given the more consistent results for <italic>GC</italic> than <italic>TE</italic> across the unit classes, our results show that <italic>GC</italic> captures the signal transmission by a neural system better than <italic>TE</italic>, perhaps due to the dominance of low frequencies in such transmission.</p>
<p>Coherence and Granger causality showed similar topology across the parameter search landscapes, and both were robust to the change in output units from Comrade to HiFi class. However, our model captures most of the available dynamic range of <italic>Coh</italic>, whereas only some 10&#x2013;20% of <italic>GC</italic> information passes our model. Given that <italic>Coh</italic> measures synchronized oscillations, i.e., oscillations with a constant phase difference across time, our model may not pass unsynchronized oscillations, and consequently, most of the white noise input variance could be lost in our simple system. Naturally, the inability to follow sinusoidal input beyond 30-Hz at the unit-class starting points limits information from high frequencies and perhaps affects more <italic>GC</italic> than <italic>Coh</italic>. Further study should extend this finding to understand how much of the dynamic range difference between <italic>GC</italic> and <italic>Coh</italic> emerge from the inability to pass incoherent signals, does the low-pass envelope follow firing frequency, and can the incoherent or higher frequency signals perhaps pass a more natural system with variable unit properties and complex structural connectivity.</p>
</sec>
<sec id="S5.SS3">
<title>5.3. Implications for biological systems</title>
<sec id="S5.SS3.SSS1">
<title>5.3.1. Energy efficiency</title>
<p>Energy efficiency is an ever-present evolutionary pressure in all organisms. Firing action potentials is an energy-demanding process, as it requires active ion transport to maintain a particular membrane potential; together with a rather small basal metabolic rate, action potentials and resulting release and recycling constitute a hefty energetic cost to gray matter (<xref ref-type="bibr" rid="B4">Attwell and Laughlin, 2001</xref>). At the same time, reliable and accurate signaling is important for information transfer, creating a need for the brain to encode information efficiently. Our results with an artificial neural network show that a temporally sparse code can transmit a gist of the input signal efficiently and it could also be a mechanism used by biological neuronal networks. This goes in line with sparse coding in the visual cortex (<xref ref-type="bibr" rid="B53">Tovee et al., 1993</xref>; <xref ref-type="bibr" rid="B54">Vinje and Gallant, 2000</xref>). Our results show that such a sparse code was not enough for the accurate reconstruction of the input but was highly successful at selecting a category from a fixed set, i.e., classification. This suggests that a biological neural network could prefer selection between categories, rather than accurate reconstruction of sensory inputs. This selection could manifest biologically as neuronal representations being conveyed as neuronal chains, describing how a series of signals would travel through the network. Instead of decoding the spiking response into membrane potential of the output units, as in our artificial model, decoding would manifest as neuronal chains, evolving in time.</p>
</sec>
<sec id="S5.SS3.SSS2">
<title>5.3.2. Information transfer profiles as a characteristic of neurons</title>
<p>Our results showed that information transfer metrics were strongly dependent on membrane parameter values. This insight combined with the fact that in both humans and mice different neuronal types have different morphological, transcriptomic, and electrophysiological properties (<xref ref-type="bibr" rid="B30">Hodge et al., 2019</xref>), implies that different cell types may have different information transfer profiles which depend on instant membrane parameters. Such information transfer characteristics could be viewed as a phenomenological characteristic of the cell. Consequently, different characteristics of information may be better transmitted by different neuronal types. Fast spiking neurons may have higher fidelity and be better at accurate representation, while slower spiking neurons may have a sparser code, and be more efficient at encoding information at distinct classes. Experimental evidence in favor of such distinction is sparse, however. Input layers to macaque monkey V1 show higher firing rates than output layers (<xref ref-type="bibr" rid="B52">Snodderly and Gur, 1995</xref>). Interestingly, these input layers showed <italic>less</italic> orientation selectivity than the output layers, which can be interpreted as input signals being dense representation of orientation whereas the output signals being able to provide a sparse representation. For inhibitory neuron types, the PV neurons help synchronize lower frequency oscillations than SST neurons (<xref ref-type="bibr" rid="B33">Jang et al., 2020</xref>), but these finding were linked primarily to the feedforward (PV) vs. feedback (SST) synchrony rather than to the fidelity of representation.</p>
<p>Another avenue for generating predictions would be to look at the types of information processed by different brain subsystems. Based on our results, one might expect neurons from regions that need accurate representations, such as the cerebellum, to have parameter values that favor accuracy and thus present higher frequencies. At the same time, regions that need more sparse information, such as the cortex, would present lower frequencies. These rough predictions match the observations of Purkinje cells in the cerebellum whose mean tonic firing rate is 23 Hz, (<xref ref-type="bibr" rid="B21">Fortier et al., 1993</xref>), associated with accurate timing of motor response, compared with pyramidal cells in the motor cortex (13 Hz), associated more with the initiation of behavior.</p>
<p>Accurately modeling these combinations of neuronal types is key for generating predictions that would further validate or disprove our hypothesis on viewing information transfer as a phenomenological characteristic of neurons. Thus, further work needs to be done in looking at how different architectures and combinations of unit classes impact information transfer.</p>
</sec>
</sec>
<sec id="S5.SS4">
<title>5.4. Implications for models of neural systems</title>
<p>Our work extends the findings of <xref ref-type="bibr" rid="B10">Brendel et al. (2020)</xref> by showing that signal transmission is heavily influenced by the biophysical properties of model units. In biological neurons, membrane conductance is under complex regulation, and we show that such regulation affects not only the firing rate but also information transmission. In our arbitrary system, accurate reconstruction occurs best under higher firing frequencies. We consider it unlikely that more complex biological model unit or network could avoid loss of information with loss of firing rate. Given the critical importance of energy efficiency in biological brain, a coding scheme surviving also low firing frequencies could have provided ecological advantage.</p>
<p>Predictive coding theory states that input is reconstructed by a neural system. The output is then fed back and subtracted from the input (<xref ref-type="bibr" rid="B45">Rao and Ballard, 1999</xref>; <xref ref-type="bibr" rid="B22">Friston, 2010</xref>; <xref ref-type="bibr" rid="B16">Clark, 2013</xref>). <xref ref-type="fig" rid="F9">Figure 9A</xref> shows the basic idea of predictive coding: a layer of neurons learns to replicate the input by iteratively subtracting the network output from the input, until there is no coding error (we omit the hierarchical aspect for simplicity).</p>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption><p>Illustration of classification with predictive coding theory. <bold>(A)</bold> Current model is based on replicating input by a neural network. When output is similar to incoming signals, the system represents the input and input can be attenuated. <bold>(B)</bold> In our refined model, the system first learns a factorial representation of incoming data, for example, in distinct neural clusters. Thereafter, classifying the input to correct neural clusters by a sparse code provides an economical way to trigger the necessary model factors. Summation over the active neural population provides the internal representation.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-17-1011814-g009.tif"/>
</fig>
<p>We suggest a new theoretical model where classification is used as an efficient way of encoding information (<xref ref-type="fig" rid="F9">Figure 9B</xref>). First, learning factorizes input patterns to a set of internal models. Thereafter, an input gets classified into one of a finite number of factors. The resulting factorial output signal is then fed back to the input layer in a similar way to the classical model. Our model would complement the existing models by suggesting an efficient way of decoding a sparse code. In summary, we suggest an ecologically valid computational implementation of the currently prevailing predictive coding theory.</p>
</sec>
<sec id="S5.SS5">
<title>5.5. Future directions</title>
<p>The parameter values studied here are not directly applicable to biological systems given the artificial network structure and simple unit model.</p>
<p>In future work, it will be useful to study information transmission and representation with Hodgkin-Huxley model units and active conductances in their dendritic compartments. The Hodgkin-Huxley model would provide significantly better reconstruction of biological neural membrane, and the active dendrites are known to be central for integrating synaptic inputs and synaptic plasticity. Moreover, our network structure is far off from biological networks and, for example, feedforward inhibition is likely necessary for fast information transmission. Such work would provide an approximation of actual biological parameters, necessary for efficient information processing.</p>
<p>In the current work, learning was executed with the original model (<xref ref-type="bibr" rid="B10">Brendel et al., 2020</xref>) in Matlab whereas the simulations were implemented in the python-based CxSystem2/Brian2 framework. Future work needs to examine together the original learning model and other contemporary plasticity models (such as <xref ref-type="bibr" rid="B17">Clopath et al., 2010</xref>). Such reconciliation would promote teaching biologically realistic models with arbitrary data and the study of computationally functioning brain models.</p>
</sec>
</sec>
<sec id="S6" sec-type="data-availability">
<title>Data availability statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found here: Analysis, visualization and automated simulation code: <ext-link ext-link-type="uri" xlink:href="https://github.com/VisualNeuroscience-UH/SystemTools">https://github.com/VisualNeuroscience-UH/SystemTools</ext-link>. Simulation framework: <ext-link ext-link-type="uri" xlink:href="https://github.com/VisualNeuroscience-UH/CxSystem2">https://github.com/VisualNeuroscience-UH/CxSystem2</ext-link>. Simulated data (516 GB): By request from the corresponding author.</p>
</sec>
<sec id="S7" sec-type="author-contributions">
<title>Author contributions</title>
<p>TGA and SV conceived the study and wrote the manuscript. TGA did the main simulations and analyses. SV programmed the SystemTools software for analysis and automated simulations. VA developed the CxSystem package in previous study, consulted on computational implementation, and proofread the manuscript. IA programmed classification analysis and conceptualized Granger causality and Transfer entropy analyses, and wrote the corresponding descriptions. MM did the simulations and analyses for the control study. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<sec id="S8" sec-type="funding-information">
<title>Funding</title>
<p>This work was supported by Helsinki University Hospital research funds (Y124930060 and Y211200055) to SV and grant from School of Medicine and Surgery of UNIBO for student mobility to MM. Our project Macaque Vision was a Partnering Project to the European Union&#x2019;s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreement No. 945539 (Human Brain Project SGA3).</p>
</sec>
<ack><p>We are most grateful to the Machens Lab at the Champalimaud Foundation for distributing the original Matlab<sup>&#x00AE;</sup> simulation code, enabling follow-up of their work. We thank MD Henri Hokkanen for insightful discussions and support in parameter settings.</p>
</ack>
<sec id="S9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="S10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="S11" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fncom.2023.1011814/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fncom.2023.1011814/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Image_1.JPEG" id="FS1" mimetype="image/jpeg" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="Image_2.JPEG" id="FS2" mimetype="image/jpeg" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="Table_1.DOCX" id="TS1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn id="footnote1">
<label>1</label>
<p><ext-link ext-link-type="uri" xlink:href="https://github.com/machenslab/spikes">https://github.com/machenslab/spikes</ext-link></p></fn>
<fn id="footnote2">
<label>2</label>
<p><ext-link ext-link-type="uri" xlink:href="https://github.com/VisualNeuroscience-UH/SystemTools">https://github.com/VisualNeuroscience-UH/SystemTools</ext-link></p></fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Amatrudo</surname> <given-names>J. M.</given-names></name> <name><surname>Weaver</surname> <given-names>C. M.</given-names></name> <name><surname>Crimins</surname> <given-names>J. L.</given-names></name> <name><surname>Hof</surname> <given-names>P. R.</given-names></name> <name><surname>Rosene</surname> <given-names>D. L.</given-names></name> <name><surname>Luebke</surname> <given-names>J. I.</given-names></name></person-group> (<year>2012</year>). <article-title>Influence of highly distinctive structural properties on the excitability of pyramidal neurons in monkey visual and prefrontal cortices.</article-title> <source><italic>J. Neurosci.</italic></source> <volume>32</volume> <fpage>13644</fpage>&#x2013;<lpage>13660</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.2581-12.2012</pub-id> <pub-id pub-id-type="pmid">23035077</pub-id></citation></ref>
<ref id="B2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Amblard</surname> <given-names>P.-O.</given-names></name> <name><surname>Michel</surname> <given-names>O. J. J.</given-names></name></person-group> (<year>2012</year>). <article-title>The relation between Granger causality and directed information theory: A review.</article-title> <source><italic>Entropy</italic></source> <volume>15</volume> <fpage>113</fpage>&#x2013;<lpage>143</lpage>.</citation></ref>
<ref id="B3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Andalibi</surname> <given-names>V.</given-names></name> <name><surname>Hokkanen</surname> <given-names>H.</given-names></name> <name><surname>Vanni</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). <article-title>Controlling complexity of cerebral cortex simulations-I: CxSystem, a flexible cortical simulation framework.</article-title> <source><italic>Neural Comput.</italic></source> <volume>31</volume> <fpage>1048</fpage>&#x2013;<lpage>1065</lpage>. <pub-id pub-id-type="doi">10.1162/neco_a_01120</pub-id></citation></ref>
<ref id="B4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Attwell</surname> <given-names>D.</given-names></name> <name><surname>Laughlin</surname> <given-names>S. B.</given-names></name></person-group> (<year>2001</year>). <article-title>An energy budget for signaling in the grey matter of the brain.</article-title> <source><italic>J. Cereb. Blood Flow Metab.</italic></source> <volume>21</volume> <fpage>1133</fpage>&#x2013;<lpage>1145</lpage>.</citation></ref>
<ref id="B5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Baddeley</surname> <given-names>R.</given-names></name></person-group> (<year>2000</year>). &#x201C;<article-title>Introductory information theory and the brain</article-title>,&#x201D; in <source><italic>Information theory and the brain</italic></source>, <role>eds</role> <person-group person-group-type="editor"><name><surname>Baddeley</surname> <given-names>R.</given-names></name> <name><surname>Hancock</surname> <given-names>P.</given-names></name> <name><surname>F&#x00F6;ldiak</surname> <given-names>P.</given-names></name></person-group> (<publisher-loc>Cambridge</publisher-loc>: <publisher-name>Cambridge University Press</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>19</lpage>.</citation></ref>
<ref id="B6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bakken</surname> <given-names>T. E.</given-names></name> <name><surname>Jorstad</surname> <given-names>N. L.</given-names></name> <name><surname>Hu</surname> <given-names>Q.</given-names></name> <name><surname>Lake</surname> <given-names>B. B.</given-names></name> <name><surname>Tian</surname> <given-names>W.</given-names></name> <name><surname>Kalmbach</surname> <given-names>B. E.</given-names></name><etal/></person-group> (<year>2021</year>). <article-title>Comparative cellular analysis of motor cortex in human, marmoset and mouse.</article-title> <source><italic>Nature</italic></source> <volume>598</volume> <fpage>111</fpage>&#x2013;<lpage>119</lpage>. <pub-id pub-id-type="doi">10.1038/s41586-021-03465-8</pub-id> <pub-id pub-id-type="pmid">34616062</pub-id></citation></ref>
<ref id="B7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barlow</surname> <given-names>H.</given-names></name></person-group> (<year>2001</year>). <article-title>Redundancy reduction revisited.</article-title> <source><italic>Network</italic></source> <volume>12</volume> <fpage>241</fpage>&#x2013;<lpage>253</lpage>.</citation></ref>
<ref id="B8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barnett</surname> <given-names>L.</given-names></name> <name><surname>Barrett</surname> <given-names>A. B.</given-names></name> <name><surname>Seth</surname> <given-names>A. K.</given-names></name></person-group> (<year>2009</year>). <article-title>Granger causality and transfer entropy are equivalent for Gaussian variables.</article-title> <source><italic>Phys. Rev. Lett.</italic></source> <volume>103</volume>:<fpage>238701</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.103.238701</pub-id> <pub-id pub-id-type="pmid">20366183</pub-id></citation></ref>
<ref id="B9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bossomaier</surname> <given-names>T.</given-names></name> <name><surname>Barnett</surname> <given-names>L.</given-names></name> <name><surname>Harr&#x00E9;</surname> <given-names>M.</given-names></name> <name><surname>Lizier</surname> <given-names>J. T.</given-names></name></person-group> (<year>2016</year>). <source><italic>An introduction to transfer entropy: Information flow in complex systems.</italic></source> <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>. <pub-id pub-id-type="doi">10.1007/978-3-319-43222-9</pub-id></citation></ref>
<ref id="B10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brendel</surname> <given-names>W.</given-names></name> <name><surname>Bourdoukan</surname> <given-names>R.</given-names></name> <name><surname>Vertechi</surname> <given-names>P.</given-names></name> <name><surname>Machens</surname> <given-names>C. K.</given-names></name> <name><surname>Den&#x00E8;ve</surname> <given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>Learning to represent signals spike by spike.</article-title> <source><italic>PLoS Comput. Biol.</italic></source> <volume>16</volume>:<fpage>e1007692</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pcbi.1007692</pub-id> <pub-id pub-id-type="pmid">32176682</pub-id></citation></ref>
<ref id="B11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brette</surname> <given-names>R.</given-names></name> <name><surname>Gerstner</surname> <given-names>W.</given-names></name></person-group> (<year>2005</year>). <article-title>Adaptive exponential integrate-and-fire model as an effective description of neuronal activity.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>94</volume> <fpage>3637</fpage>&#x2013;<lpage>3642</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00686.2005</pub-id> <pub-id pub-id-type="pmid">16014787</pub-id></citation></ref>
<ref id="B12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bullier</surname> <given-names>J.</given-names></name></person-group> (<year>2001</year>). <article-title>Integrated model of visual processing.</article-title> <source><italic>Brain Res. Brain Res. Rev.</italic></source> <volume>36</volume> <fpage>96</fpage>&#x2013;<lpage>107</lpage>.</citation></ref>
<ref id="B13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Buonomano</surname> <given-names>D. V.</given-names></name> <name><surname>Merzenich</surname> <given-names>M. M.</given-names></name></person-group> (<year>1998</year>). <article-title>CORTICAL PLASTICITY: From synapses to maps.</article-title> <source><italic>Annu. Rev. Neurosci.</italic></source> <volume>21</volume> <fpage>149</fpage>&#x2013;<lpage>186</lpage>. <pub-id pub-id-type="doi">10.1146/annurev.neuro.21.1.149</pub-id> <pub-id pub-id-type="pmid">9530495</pub-id></citation></ref>
<ref id="B14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Caruana</surname> <given-names>R.</given-names></name> <name><surname>Niculescu-Mizil</surname> <given-names>A.</given-names></name></person-group> (<year>2004</year>). &#x201C;<article-title>Data mining in metric space: an empirical analysis of supervised learning performance criteria</article-title>,&#x201D; in <source><italic>Proceedings of the 2004 10th ACM SIGKDD international conference on Knowledge discovery and data mining</italic></source>, <publisher-loc>Seattle, WA</publisher-loc>, <fpage>69</fpage>&#x2013;<lpage>78</lpage>.</citation></ref>
<ref id="B15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cerqueira</surname> <given-names>V.</given-names></name> <name><surname>Torgo</surname> <given-names>L.</given-names></name> <name><surname>Smailovi&#x0107;</surname> <given-names>J.</given-names></name> <name><surname>Mozeti&#x010D;</surname> <given-names>I.</given-names></name></person-group> (<year>2017</year>). &#x201C;<article-title>A comparative study of performance estimation methods for time series forecasting</article-title>,&#x201D; in <source><italic>Proceedings of the 2017 IEEE international conference on data science and advanced analytics (DSAA)</italic></source>, <publisher-loc>Tokyo</publisher-loc>, <fpage>529</fpage>&#x2013;<lpage>538</lpage>.</citation></ref>
<ref id="B16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Clark</surname> <given-names>A.</given-names></name></person-group> (<year>2013</year>). <article-title>Whatever next? Predictive brains, situated agents, and the future of cognitive science.</article-title> <source><italic>Behav. Brain Sci.</italic></source> <volume>36</volume> <fpage>181</fpage>&#x2013;<lpage>253</lpage>. <pub-id pub-id-type="doi">10.1017/S0140525X12000477</pub-id> <pub-id pub-id-type="pmid">23663408</pub-id></citation></ref>
<ref id="B17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Clopath</surname> <given-names>C.</given-names></name> <name><surname>B&#x00FC;sing</surname> <given-names>L.</given-names></name> <name><surname>Vasilaki</surname> <given-names>E.</given-names></name> <name><surname>Gerstner</surname> <given-names>W.</given-names></name></person-group> (<year>2010</year>). <article-title>Connectivity reflects coding: A model of voltage-based STDP with homeostasis.</article-title> <source><italic>Nat. Neurosci.</italic></source> <volume>13</volume> <fpage>344</fpage>&#x2013;<lpage>352</lpage>. <pub-id pub-id-type="doi">10.1038/nn.2479</pub-id> <pub-id pub-id-type="pmid">20098420</pub-id></citation></ref>
<ref id="B18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Den&#x00E8;ve</surname> <given-names>S.</given-names></name> <name><surname>Alemi</surname> <given-names>A.</given-names></name> <name><surname>Bourdoukan</surname> <given-names>R.</given-names></name></person-group> (<year>2017</year>). <article-title>The brain as an efficient and robust adaptive learner.</article-title> <source><italic>Neuron</italic></source> <volume>94</volume> <fpage>969</fpage>&#x2013;<lpage>977</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2017.05.016</pub-id> <pub-id pub-id-type="pmid">28595053</pub-id></citation></ref>
<ref id="B19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Destexhe</surname> <given-names>A.</given-names></name> <name><surname>Marder</surname> <given-names>E.</given-names></name></person-group> (<year>2004</year>). <article-title>Plasticity in single neuron and circuit computations.</article-title> <source><italic>Nature</italic></source> <volume>431</volume> <fpage>789</fpage>&#x2013;<lpage>795</lpage>. <pub-id pub-id-type="doi">10.1038/nature03011</pub-id> <pub-id pub-id-type="pmid">15483600</pub-id></citation></ref>
<ref id="B20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Diesmann</surname> <given-names>M.</given-names></name> <name><surname>Gewaltig</surname> <given-names>M. O.</given-names></name> <name><surname>Aertsen</surname> <given-names>A.</given-names></name></person-group> (<year>1999</year>). <article-title>Stable propagation of synchronous spiking in cortical neural networks.</article-title> <source><italic>Nature</italic></source> <volume>402</volume> <fpage>529</fpage>&#x2013;<lpage>533</lpage>. <pub-id pub-id-type="doi">10.1038/990101</pub-id> <pub-id pub-id-type="pmid">10591212</pub-id></citation></ref>
<ref id="B21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fortier</surname> <given-names>P. A.</given-names></name> <name><surname>Smith</surname> <given-names>A. M.</given-names></name> <name><surname>Kalaska</surname> <given-names>J. F.</given-names></name></person-group> (<year>1993</year>). <article-title>Comparison of cerebellar and motor cortex activity during reaching: Directional tuning and response variability.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>69</volume> <fpage>1136</fpage>&#x2013;<lpage>1149</lpage>.</citation></ref>
<ref id="B22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Friston</surname> <given-names>K.</given-names></name></person-group> (<year>2010</year>). <article-title>The free-energy principle: A unified brain theory?</article-title> <source><italic>Nat. Rev. Neurosci.</italic></source> <volume>11</volume> <fpage>127</fpage>&#x2013;<lpage>138</lpage>.</citation></ref>
<ref id="B23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gardner</surname> <given-names>W. A.</given-names></name></person-group> (<year>1992</year>). <article-title>A unifying view of coherence in signal processing.</article-title> <source><italic>Signal Process.</italic></source> <volume>29</volume> <fpage>113</fpage>&#x2013;<lpage>140</lpage>.</citation></ref>
<ref id="B24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gerstner</surname> <given-names>W.</given-names></name> <name><surname>Naud</surname> <given-names>R.</given-names></name></person-group> (<year>2009</year>). <article-title>How good are neuron models?</article-title> <source><italic>Science</italic></source> <volume>326</volume> <fpage>379</fpage>&#x2013;<lpage>380</lpage>. <pub-id pub-id-type="doi">10.1126/science.1181936</pub-id> <pub-id pub-id-type="pmid">19833951</pub-id></citation></ref>
<ref id="B25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Geweke</surname> <given-names>J.</given-names></name></person-group> (<year>1982</year>). <article-title>Measurement of linear dependence and feedback between multiple time series.</article-title> <source><italic>J. Am. Stat. Assoc.</italic></source> <volume>77</volume> <fpage>304</fpage>&#x2013;<lpage>313</lpage>. <pub-id pub-id-type="doi">10.1080/01621459.1982.10477803</pub-id></citation></ref>
<ref id="B26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gilman</surname> <given-names>J. P.</given-names></name> <name><surname>Medalla</surname> <given-names>M.</given-names></name> <name><surname>Luebke</surname> <given-names>J. I.</given-names></name></person-group> (<year>2017</year>). <article-title>Area-specific features of pyramidal neurons-a comparative study in mouse and rhesus monkey.</article-title> <source><italic>Cereb. Cortex</italic></source> <volume>27</volume> <fpage>2078</fpage>&#x2013;<lpage>2094</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhw062</pub-id> <pub-id pub-id-type="pmid">26965903</pub-id></citation></ref>
<ref id="B27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Goodman</surname> <given-names>D. F. M.</given-names></name> <name><surname>Brette</surname> <given-names>R.</given-names></name></person-group> (<year>2009</year>). <article-title>The brain simulator.</article-title> <source><italic>Front. Neurosci.</italic></source> <volume>3</volume>:<fpage>192</fpage>&#x2013;<lpage>197</lpage>. <pub-id pub-id-type="doi">10.3389/neuro.01.026.2009</pub-id> <pub-id pub-id-type="pmid">20011141</pub-id></citation></ref>
<ref id="B28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Granger</surname> <given-names>C. J. W.</given-names></name></person-group> (<year>1969</year>). <article-title>Investigating causal relations by econometric models and cross-spectral methods.</article-title> <source><italic>Econometrica</italic></source> <volume>37</volume> <fpage>424</fpage>&#x2013;<lpage>438</lpage>.</citation></ref>
<ref id="B29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Grossberg</surname> <given-names>S.</given-names></name></person-group> (<year>1980</year>). <article-title>How does a brain build a cognitive code?</article-title> <source><italic>Psychol. Rev.</italic></source> <volume>87</volume> <fpage>1</fpage>&#x2013;<lpage>51</lpage>. <pub-id pub-id-type="doi">10.1093/mind/xxii.10.603</pub-id></citation></ref>
<ref id="B30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hodge</surname> <given-names>R. D.</given-names></name> <name><surname>Bakken</surname> <given-names>T. E.</given-names></name> <name><surname>Miller</surname> <given-names>J. A.</given-names></name> <name><surname>Smith</surname> <given-names>K. A.</given-names></name> <name><surname>Barkan</surname> <given-names>E. R.</given-names></name> <name><surname>Graybuck</surname> <given-names>L. T.</given-names></name><etal/></person-group> (<year>2019</year>). <article-title>Conserved cell types with divergent features in human versus mouse cortex.</article-title> <source><italic>Nature</italic></source> <volume>573</volume> <fpage>61</fpage>&#x2013;<lpage>68</lpage>. <pub-id pub-id-type="doi">10.1038/s41586-019-1506-7</pub-id> <pub-id pub-id-type="pmid">31435019</pub-id></citation></ref>
<ref id="B31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hokkanen</surname> <given-names>H.</given-names></name> <name><surname>Andalibi</surname> <given-names>V.</given-names></name> <name><surname>Vanni</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). <article-title>Controlling complexity of cerebral cortex simulations-II: Streamlined microcircuits.</article-title> <source><italic>Neural Comput.</italic></source> <volume>31</volume> <fpage>1066</fpage>&#x2013;<lpage>1084</lpage>. <pub-id pub-id-type="doi">10.1162/neco_a_01188</pub-id></citation></ref>
<ref id="B32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hornik</surname> <given-names>K.</given-names></name> <name><surname>Stinchcombe</surname> <given-names>M.</given-names></name> <name><surname>White</surname> <given-names>H.</given-names></name></person-group> (<year>1989</year>). <article-title>Multilayer feedforward networks are universal approximators.</article-title> <source><italic>Neural Netw.</italic></source> <volume>2</volume> <fpage>359</fpage>&#x2013;<lpage>366</lpage>. <pub-id pub-id-type="doi">10.1016/0893-6080(89)90020-8</pub-id></citation></ref>
<ref id="B33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jang</surname> <given-names>H. J.</given-names></name> <name><surname>Chung</surname> <given-names>H.</given-names></name> <name><surname>Rowland</surname> <given-names>J. M.</given-names></name> <name><surname>Richards</surname> <given-names>B. A.</given-names></name> <name><surname>Kohl</surname> <given-names>M. M.</given-names></name> <name><surname>Kwag</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Distinct roles of parvalbumin and somatostatin interneurons in gating the synchronization of spike times in the neocortex.</article-title> <source><italic>Sci. Adv.</italic></source> <volume>6</volume>:<fpage>eaay5333</fpage>. <pub-id pub-id-type="doi">10.1126/sciadv.aay5333</pub-id> <pub-id pub-id-type="pmid">32426459</pub-id></citation></ref>
<ref id="B34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jolivet</surname> <given-names>R.</given-names></name> <name><surname>Lewis</surname> <given-names>T. J.</given-names></name> <name><surname>Gerstner</surname> <given-names>W.</given-names></name></person-group> (<year>2004</year>). <article-title>Generalized integrate-and-fire models of neuronal activity approximate spike trains of a detailed model to a high degree of accuracy.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>92</volume> <fpage>959</fpage>&#x2013;<lpage>976</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00190.2004</pub-id> <pub-id pub-id-type="pmid">15277599</pub-id></citation></ref>
<ref id="B35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lara-Benitez</surname> <given-names>P.</given-names></name> <name><surname>Carranza-Garcia</surname> <given-names>M.</given-names></name> <name><surname>Riquelme</surname> <given-names>J. C.</given-names></name></person-group> (<year>2021</year>). <article-title>An experimental review on deep learning architectures for time series forecasting.</article-title> <source><italic>Int. J. Neural Syst.</italic></source> <volume>31</volume>:<fpage>2130001</fpage>. <pub-id pub-id-type="doi">10.1142/S0129065721300011</pub-id> <pub-id pub-id-type="pmid">33588711</pub-id></citation></ref>
<ref id="B36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Luebke</surname> <given-names>J. I.</given-names></name> <name><surname>Medalla</surname> <given-names>M.</given-names></name> <name><surname>Amatrudo</surname> <given-names>J. M.</given-names></name> <name><surname>Weaver</surname> <given-names>C. M.</given-names></name> <name><surname>Crimins</surname> <given-names>J. L.</given-names></name> <name><surname>Hunt</surname> <given-names>B.</given-names></name><etal/></person-group> (<year>2015</year>). <article-title>Age-related changes to layer 3 pyramidal cells in the rhesus monkey visual cortex.</article-title> <source><italic>Cereb. Cortex</italic></source> <volume>25</volume> <fpage>1454</fpage>&#x2013;<lpage>1468</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bht336</pub-id> <pub-id pub-id-type="pmid">24323499</pub-id></citation></ref>
<ref id="B37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Makridakis</surname> <given-names>S.</given-names></name> <name><surname>Spiliotis</surname> <given-names>E.</given-names></name> <name><surname>Assimakopoulos</surname> <given-names>V.</given-names></name></person-group> (<year>2018</year>). <article-title>Statistical and machine learning forecasting methods: Concerns and ways forward.</article-title> <source><italic>PLoS One</italic></source> <volume>13</volume>:<fpage>e0194889</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0194889</pub-id> <pub-id pub-id-type="pmid">29584784</pub-id></citation></ref>
<ref id="B38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>McQuarrie</surname> <given-names>A. D. R.</given-names></name> <name><surname>Tsai</surname> <given-names>C.-L.</given-names></name></person-group> (<year>1998</year>). <source><italic>Regression and time series model selection.</italic></source> <publisher-loc>Singapore</publisher-loc>: <publisher-name>World Scientific</publisher-name>.</citation></ref>
<ref id="B39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mensi</surname> <given-names>S.</given-names></name> <name><surname>Naud</surname> <given-names>R.</given-names></name> <name><surname>Pozzorini</surname> <given-names>C.</given-names></name> <name><surname>Avermann</surname> <given-names>M.</given-names></name> <name><surname>Petersen</surname> <given-names>C. C. H.</given-names></name> <name><surname>Gerstner</surname> <given-names>W.</given-names></name></person-group> (<year>2012</year>). <article-title>Parameter extraction and classification of three cortical neuron types reveals two distinct adaptation mechanisms.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>107</volume> <fpage>1756</fpage>&#x2013;<lpage>1775</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00408.2011</pub-id> <pub-id pub-id-type="pmid">22157113</pub-id></citation></ref>
<ref id="B40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mumford</surname> <given-names>D.</given-names></name></person-group> (<year>1992</year>). <article-title>On the computational architecture of the neocortex - II The role of cortico-cortical loops.</article-title> <source><italic>Biol. Cybern.</italic></source> <volume>66</volume> <fpage>241</fpage>&#x2013;<lpage>251</lpage>. <pub-id pub-id-type="doi">10.1007/BF00198477</pub-id> <pub-id pub-id-type="pmid">1540675</pub-id></citation></ref>
<ref id="B41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nordlie</surname> <given-names>E.</given-names></name> <name><surname>Gewaltig</surname> <given-names>M. O.</given-names></name> <name><surname>Plesser</surname> <given-names>H. E.</given-names></name></person-group> (<year>2009</year>). <article-title>Towards reproducible descriptions of neuronal network models.</article-title> <source><italic>PLoS Comput. Biol.</italic></source> <volume>5</volume>:<fpage>1000456</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pcbi.1000456</pub-id> <pub-id pub-id-type="pmid">19662159</pub-id></citation></ref>
<ref id="B42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Panzeri</surname> <given-names>S.</given-names></name> <name><surname>Macke</surname> <given-names>J. H.</given-names></name> <name><surname>Gross</surname> <given-names>J.</given-names></name> <name><surname>Kayser</surname> <given-names>C.</given-names></name></person-group> (<year>2015</year>). <article-title>Neural population coding: Combining insights from microscopic and mass signals.</article-title> <source><italic>Trends Cogn. Sci.</italic></source> <volume>19</volume> <fpage>162</fpage>&#x2013;<lpage>172</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2015.01.002</pub-id> <pub-id pub-id-type="pmid">25670005</pub-id></citation></ref>
<ref id="B43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pascual-Leone</surname> <given-names>A.</given-names></name> <name><surname>Amedi</surname> <given-names>A.</given-names></name> <name><surname>Fregni</surname> <given-names>F.</given-names></name> <name><surname>Merabet</surname> <given-names>L. B.</given-names></name></person-group> (<year>2005</year>). <article-title>The plastic human brain cortex.</article-title> <source><italic>Annu. Rev. Neurosci.</italic></source> <volume>28</volume> <fpage>377</fpage>&#x2013;<lpage>401</lpage>. <pub-id pub-id-type="doi">10.1146/annurev.neuro.27.070203.144216</pub-id> <pub-id pub-id-type="pmid">16022601</pub-id></citation></ref>
<ref id="B44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Povysheva</surname> <given-names>N. V.</given-names></name> <name><surname>Zaitsev</surname> <given-names>A. V.</given-names></name> <name><surname>Gonzalez-Burgos</surname> <given-names>G.</given-names></name> <name><surname>Lewis</surname> <given-names>D. A.</given-names></name></person-group> (<year>2013</year>). <article-title>Electrophysiological heterogeneity of fast-spiking interneurons: Chandelier versus basket cells.</article-title> <source><italic>PLoS One</italic></source> <volume>8</volume>:<fpage>e0070553</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0070553</pub-id> <pub-id pub-id-type="pmid">23950961</pub-id></citation></ref>
<ref id="B45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rao</surname> <given-names>R. P.</given-names></name> <name><surname>Ballard</surname> <given-names>D. H.</given-names></name></person-group> (<year>1999</year>). <article-title>Predictive coding in the visual cortex: A functional interpretation of some extra-classical receptive-field effects.</article-title> <source><italic>Nat. Neurosci.</italic></source> <volume>2</volume> <fpage>79</fpage>&#x2013;<lpage>87</lpage>. <pub-id pub-id-type="doi">10.1038/4580</pub-id> <pub-id pub-id-type="pmid">10195184</pub-id></citation></ref>
<ref id="B46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rauch</surname> <given-names>A.</given-names></name> <name><surname>La Camera</surname> <given-names>G.</given-names></name> <name><surname>L&#x00FC;scher</surname> <given-names>H. R.</given-names></name> <name><surname>Senn</surname> <given-names>W.</given-names></name> <name><surname>Fusi</surname> <given-names>S.</given-names></name></person-group> (<year>2003</year>). <article-title>Neocortical pyramidal cells respond as integrate-and-fire neurons to <italic>in vivo</italic>-like input currents.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>90</volume> <fpage>1598</fpage>&#x2013;<lpage>1612</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00293.2003</pub-id> <pub-id pub-id-type="pmid">12750422</pub-id></citation></ref>
<ref id="B47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rull&#x00E1;n Bux&#x00F3;</surname> <given-names>C. E.</given-names></name> <name><surname>Pillow</surname> <given-names>J. W.</given-names></name></person-group> (<year>2020</year>). <article-title>Poisson balanced spiking networks.</article-title> <source><italic>PLoS Comput. Biol.</italic></source> <volume>16</volume>:<fpage>e1008261</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pcbi.1008261</pub-id> <pub-id pub-id-type="pmid">33216741</pub-id></citation></ref>
<ref id="B48"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Salmelin</surname> <given-names>R.</given-names></name> <name><surname>Hari</surname> <given-names>R.</given-names></name> <name><surname>Lounasmaa</surname> <given-names>O. V.</given-names></name> <name><surname>Sams</surname> <given-names>M.</given-names></name></person-group> (<year>1994</year>). <article-title>Dynamics of brain activation during picture naming.</article-title> <source><italic>Nature</italic></source> <volume>368</volume> <fpage>463</fpage>&#x2013;<lpage>465</lpage>.</citation></ref>
<ref id="B49"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schreiber</surname> <given-names>T.</given-names></name></person-group> (<year>2000</year>). <article-title>Measuring information transfer.</article-title> <source><italic>Phys. Rev. Lett.</italic></source> <volume>85</volume> <fpage>461</fpage>&#x2013;<lpage>464</lpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.85.461</pub-id> <pub-id pub-id-type="pmid">10991308</pub-id></citation></ref>
<ref id="B50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sclar</surname> <given-names>G.</given-names></name> <name><surname>Maunsell</surname> <given-names>J. H. R.</given-names></name> <name><surname>Lennie</surname> <given-names>P.</given-names></name></person-group> (<year>1990</year>). <article-title>Coding of image contrast in central visual pathways of the macaque monkey.</article-title> <source><italic>Vision Res.</italic></source> <volume>30</volume> <fpage>1</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1016/0042-6989(90)90123-3</pub-id></citation></ref>
<ref id="B51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shannon</surname> <given-names>C. E.</given-names></name></person-group> (<year>1948</year>). <article-title>A Mathematical theory of communication.</article-title> <source><italic>Bell Syst. Tech. J.</italic></source> <volume>27</volume> <fpage>379</fpage>&#x2013;<lpage>423</lpage>.</citation></ref>
<ref id="B52"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Snodderly</surname> <given-names>D. M.</given-names></name> <name><surname>Gur</surname> <given-names>M.</given-names></name></person-group> (<year>1995</year>). <article-title>Organization of striate cortex of alert, trained monkeys (<italic>Macaca fascicularis</italic>): Ongoing activity, stimulus selectivity, and widths of receptive field activating regions.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>74</volume> <fpage>2100</fpage>&#x2013;<lpage>2125</lpage>. <pub-id pub-id-type="doi">10.1152/jn.1995.74.5.2100</pub-id> <pub-id pub-id-type="pmid">8592200</pub-id></citation></ref>
<ref id="B53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tovee</surname> <given-names>M. J.</given-names></name> <name><surname>Rolls</surname> <given-names>E. T.</given-names></name> <name><surname>Treves</surname> <given-names>A.</given-names></name> <name><surname>Bellis</surname> <given-names>R. P.</given-names></name></person-group> (<year>1993</year>). <article-title>Information encoding and the responses of single neurons in the primate temporal visual cortex.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>70</volume> <fpage>640</fpage>&#x2013;<lpage>654</lpage>.</citation></ref>
<ref id="B54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vinje</surname> <given-names>W. E.</given-names></name> <name><surname>Gallant</surname> <given-names>J. L.</given-names></name></person-group> (<year>2000</year>). <article-title>Sparse coding and decorrelation in primary visual cortex during natural vision.</article-title> <source><italic>Science</italic></source> <volume>287</volume> <fpage>1273</fpage>&#x2013;<lpage>1276</lpage>. <pub-id pub-id-type="doi">10.1126/science.287.5456.1273</pub-id> <pub-id pub-id-type="pmid">10678835</pub-id></citation></ref>
<ref id="B55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Virtanen</surname> <given-names>P.</given-names></name> <name><surname>Gommers</surname> <given-names>R.</given-names></name> <name><surname>Oliphant</surname> <given-names>T. E.</given-names></name> <name><surname>Haberland</surname> <given-names>M.</given-names></name> <name><surname>Reddy</surname> <given-names>T.</given-names></name> <name><surname>Cournapeau</surname> <given-names>D.</given-names></name><etal/></person-group> (<year>2020</year>). <article-title>SciPy 1.0: Fundamental algorithms for scientific computing in python.</article-title> <source><italic>Nat. Methods</italic></source> <volume>17</volume> <fpage>261</fpage>&#x2013;<lpage>272</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-019-0686-2</pub-id> <pub-id pub-id-type="pmid">32015543</pub-id></citation></ref>
</ref-list>
</back>
</article>
