<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="methods-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neuroinform.</journal-id>
<journal-title>Frontiers in Neuroinformatics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neuroinform.</abbrev-journal-title>
<issn pub-type="epub">1662-5196</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fninf.2024.1156683</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Methods</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Multiscale co-simulation design pattern for neuroscience applications</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Kusch</surname> <given-names>Lionel</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/812633/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Diaz-Pier</surname> <given-names>Sandra</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/264471/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Klijn</surname> <given-names>Wouter</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1470442/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Sontheimer</surname> <given-names>Kim</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Bernard</surname> <given-names>Christophe</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/55359/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Morrison</surname> <given-names>Abigail</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/13504/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Jirsa</surname> <given-names>Viktor</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/4334/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Institut de Neurosciences des Syst&#x000E8;mes (INS), UMR1106, Aix-Marseille Universit&#x000E9;</institution>, <addr-line>Marseilles</addr-line>, <country>France</country></aff>
<aff id="aff2"><sup>2</sup><institution>Simulation and Data Lab Neuroscience, J&#x000FC;lich Supercomputing Centre (JSC), Institute for Advanced Simulation, JARA, Forschungszentrum J&#x000FC;lich GmbH</institution>, <addr-line>J&#x000FC;lich</addr-line>, <country>Germany</country></aff>
<aff id="aff3"><sup>3</sup><institution>Forschungszentrum J&#x000FC;lich GmbH, IAS-6/INM-6, JARA</institution>, <addr-line>J&#x000FC;lich</addr-line>, <country>Germany</country></aff>
<aff id="aff4"><sup>4</sup><institution>Computer Science 3 - Software Engineering, RWTH Aachen University</institution>, <addr-line>Aachen</addr-line>, <country>Germany</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Arnd Roth, University College London, United Kingdom</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Mikael Djurfeldt, Royal Institute of Technology, Sweden</p>
<p>David Phillip Nickerson, The University of Auckland, New Zealand</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Lionel Kusch <email>lionel.kusch&#x00040;grenoble-inp.org</email></corresp>
<corresp id="c002">Viktor Jirsa <email>viktor.jirsa&#x00040;univ-amu.fr</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>12</day>
<month>02</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>18</volume>
<elocation-id>1156683</elocation-id>
<history>
<date date-type="received">
<day>01</day>
<month>02</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>19</day>
<month>01</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2024 Kusch, Diaz-Pier, Klijn, Sontheimer, Bernard, Morrison and Jirsa.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Kusch, Diaz-Pier, Klijn, Sontheimer, Bernard, Morrison and Jirsa</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>Integration of information across heterogeneous sources creates added scientific value. Interoperability of data, tools and models is, however, difficult to accomplish across spatial and temporal scales. Here we introduce the toolbox Parallel Co-Simulation, which enables the interoperation of simulators operating at different scales. We provide a software science co-design pattern and illustrate its functioning along a neuroscience example, in which individual regions of interest are simulated on the cellular level allowing us to study detailed mechanisms, while the remaining network is efficiently simulated on the population level. A workflow is illustrated for the use case of The Virtual Brain and NEST, in which the CA1 region of the cellular-level hippocampus of the mouse is embedded into a full brain network involving micro and macro electrode recordings. This new tool allows integrating knowledge across scales in the same simulation framework and validating them against multiscale experiments, thereby largely widening the explanatory power of computational models.</p></abstract>
<kwd-group>
<kwd>co-simulation</kwd>
<kwd>multiscale</kwd>
<kwd>brain network model</kwd>
<kwd>spiking neural network</kwd>
<kwd>mouse brain</kwd>
</kwd-group>
<counts>
<fig-count count="5"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="68"/>
<page-count count="15"/>
<word-count count="11297"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1 Introduction</title>
<p>The brain is a complex system that includes billions of cells that interact with each other in a nonlinear manner. As a result, even if we were able to measure what all cells are doing simultaneously, we would not necessarily gain a deeper understanding of how the brain works. It has been previously claimed that emergent properties can be only understood through an integrated approach (Cilliers, <xref ref-type="bibr" rid="B13">2008</xref>), ideally in a common theoretical framework to give meaning to data at all scales (Fr&#x000E9;gnac, <xref ref-type="bibr" rid="B26">2021</xref>). Such a framework using theoretical models can account for nonlinearities and subsequently explain emergent properties (Pillai and Jirsa, <xref ref-type="bibr" rid="B54">2017</xref>; Jirsa and Sheheitli, <xref ref-type="bibr" rid="B36">2022</xref>). Numerous models have been developed to study the interactions of molecules within cells, cell physiology, the activity of cell populations, full brain dynamics and human behavior (Finkelstein et al., <xref ref-type="bibr" rid="B24">2004</xref>; Huys et al., <xref ref-type="bibr" rid="B35">2014</xref>; Einevoll et al., <xref ref-type="bibr" rid="B22">2019</xref>). It is currently impossible to model the brain with all its cellular and molecular constituents due to limitations in resolution, computational resources, or available data from measurements. As a result, even if a given physio/pathological process can be modeled at the macroscopic scale, the lack of microscopic resolution at the molecular scale prevents obtaining mechanistic insight (Meier-Schellersheim et al., <xref ref-type="bibr" rid="B45">2009</xref>). It is, therefore, important to bridge different scales, which is a challenge not unique to neuroscience. In material science, the study of composite materials requires the description of molecular interactions of individual composites and a global description for the analysis of the subsequent deformation of the composite plate (Schlick et al., <xref ref-type="bibr" rid="B63">2021</xref>). In biology, to understand the effect of drugs on tumor growth, it is necessary to model the tissue of cells around the tumor, the tumor cells, and the subcellular transduction signaling pathways (Rejniak and Anderson, <xref ref-type="bibr" rid="B57">2011</xref>; Rahman et al., <xref ref-type="bibr" rid="B56">2017</xref>). In neuroscience, synaptic plasticity uses mechanisms of spike timing on the millisecond scale but leads to the formation of long-term memory evolving on the scale of minutes, days and weeks (Durstewitz et al., <xref ref-type="bibr" rid="B21">2011</xref>).</p>
<p>Our current study aims to provide a methodology to address the scientific and technical problems of multiscale co-simulation in the brain. The main difficulty of multiscale simulation is to enable the information exchange between models formulated at different scales. Such communication can be interpreted as a coupling across scales. For example, in the case of tumors, the tissue around the tumors is represented by a continuum model (first scale), which interacts with discrete tumor cells (second scale); while continuous signaling pathways are modeled in cells (third scale). At present, it is not possible to create a common coupling function amongst these three scales and each scale can use a dedicated simulator engine for optimizing the simulation. In the case of tumors, a common approach is to use COMSOL Multiphysics (COMSOL, <xref ref-type="bibr" rid="B14">2019</xref>) for the tissue simulation, and Matlab (MATLAB, <xref ref-type="bibr" rid="B42">2017</xref>) for the simulation at cellular and subcellular scales. Because the interaction of simulator engines is not a commonly supported feature, co-simulation of models at different scales and within a common framework is challenging. Existing solutions for co-simulation in physics (Gomes et al., <xref ref-type="bibr" rid="B29">2018</xref>; Fish et al., <xref ref-type="bibr" rid="B25">2021</xref>) or in biology (Hetherington et al., <xref ref-type="bibr" rid="B33">2007</xref>; Matthews and Marshall-Coln, <xref ref-type="bibr" rid="B43">2021</xref>) cannot be easily adapted in neuroscience due to the specificity of simulators and models. There is a large number of scale-specific simulators in neuroscience, e.g., for compartmental neurons: Neuron (Carnevale and Hines, <xref ref-type="bibr" rid="B11">2006</xref>), Arbor (Akar et al., <xref ref-type="bibr" rid="B2">2019</xref>), Genesis (Bower and Beeman, <xref ref-type="bibr" rid="B7">1998</xref>); for point neurons: NEST (Gewaltig and Diesmann, <xref ref-type="bibr" rid="B27">2007</xref>), Brian (Stimberg et al., <xref ref-type="bibr" rid="B66">2019</xref>), ANNarchy (Vitay et al., <xref ref-type="bibr" rid="B67">2015</xref>); for the brain network: The Virtual Brain (TVB) (Sanz Leon et al., <xref ref-type="bibr" rid="B59">2013</xref>), Neurolib (Cakan et al., <xref ref-type="bibr" rid="B10">2023</xref>). Most of these simulators can support multiscale simulation to a limited degree, but they remain specialized and optimized for supporting a specific model type; consequently, the usage of other model types diminishes their optimal performance. The objective of co-simulation is to remove this limitation by exploiting the advantages of each simulator within the same simulation (Goddard et al., <xref ref-type="bibr" rid="B28">2001</xref>; Djurfeldt et al., <xref ref-type="bibr" rid="B19">2010</xref>; Mitchinson et al., <xref ref-type="bibr" rid="B48">2010</xref>; Falotico et al., <xref ref-type="bibr" rid="B23">2017</xref>).</p>
<p>Schirner et al. (<xref ref-type="bibr" rid="B62">2022</xref>) provide an overview of software tools available for TVB in the European digital neuroscience infrastructure EBRAINS. Two toolboxes for co-simulation are introduced in EBRAINS, TVB-Multiscale and Parallel Co-Simulation. The former toolbox focuses on rapid development for scientific use cases, whereas the latter focuses on optimisation of co-simulation performance and applies the co-simulation design pattern presented in this study. An illustrative example of co-simulation of multiscale models using TVB Multiscale co-simulation is virtual deep brain stimulation (Meier et al., <xref ref-type="bibr" rid="B44">2022</xref>; Shaheen et al., <xref ref-type="bibr" rid="B64">2022</xref>).</p>
<p>Here we present the methodology of the Parallel Co-Simulation toolbox and illustrate its use along the example of combined microscopic Local Field Potential (LFP) and neuronal firing recordings, and macroscopic electro-COrticoGraphy (ECOG) in mice (Renz et al., <xref ref-type="bibr" rid="B58">2020</xref>). This example aims to demonstrate computational requirements for interpreting recorded multiscale data using multiscale modeling (D&#x00027;Angelo and Jirsa, <xref ref-type="bibr" rid="B17">2022</xref>). The method is based on a software science co-design pattern (Dudkowski, <xref ref-type="bibr" rid="B20">2009</xref>) that dictates the separation of science and technical attributes, allowing these to be addressed in isolation where possible. This separation is based on transformer modules, which synchronize and connect simulators and include the function for transforming data between scales. A multiscale model is built from experimental data obtained in the mouse brain with ECOG cortical signals and LFP signals in the CA1 region of the hippocampus. We co-simulate the model using the simulators TVB and NEST and demonstrate the performance and limitations of the approach along three concrete examples of multiscale network dynamics. The following sections describe the technical details and the optimisation for co-simulation.</p>
</sec>
<sec sec-type="results" id="s2">
<title>2 Results</title>
<p>The multiscale co-simulation software science co-design pattern formalizes the interactions between parallel simulations at different scales. The data transformation among scales is performed during their transfer among simulators. This design pattern comprises five modules (<xref ref-type="fig" rid="F1">Figure 1A</xref>): one launcher, two simulators, and two transfer modules. Each transfer module contains three components: one interface for receiving data, one interface for sending data and a transformation process. The launcher starts and handles the coordination of simulation parameters. The simulators perform scale-specific simulations. The transfer modules transfer the data from one simulator to another. During the transfer, the transformation process transforms the incoming data for the simulator on the receiver side.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Multiscale co-simulation design pattern and example of an application in neuroscience. <bold>(A)</bold> Multiscale co-simulation design pattern between two simulators using transfer modules to transform and transfer data between scales. <bold>(B)</bold> Application of the co-simulation pattern for a neuroscience use case focusing on the CA1 region of a mouse brain. <bold>Left</bold> shows a rendering of the mouse brain from Allen Institute (Lein et al., <xref ref-type="bibr" rid="B41">2007</xref>). Blue spheres mark the centers of mouse brain regions, and the red spheres are a subset of neurons of the CA1. <bold>Right</bold> illustrates the co-simulation data flow between TVB (Sanz Leon et al., <xref ref-type="bibr" rid="B59">2013</xref>) and NEST (Hahne et al., <xref ref-type="bibr" rid="B32">2021</xref>), showing the different functional modules. The four corners&#x00027; plots illustrate the data type exchanged in respective information channels. The transfer modules exchange mean firing rate data with TVB (module on the right) and exchange spike times with NEST (module on the left). Each population has a specific module enabling data transfer between populations in different scales.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fninf-18-1156683-g0001.tif"/>
</fig>
<p>This study applies the multiscale co-simulation design pattern to a virtual experiment workflow between the <italic>in-silico</italic> mouse whole-brain dynamics and the <italic>in-silico</italic> micro-scale network dynamics of the hippocampus CA1 region. The recording sites of the virtual CA1 and virtual mouse brain are located at similar positions (Renz et al., <xref ref-type="bibr" rid="B58">2020</xref>) (see <xref ref-type="fig" rid="F1">Figure 1B</xref>). The Virtual Brain (TVB) (Sanz Leon et al., <xref ref-type="bibr" rid="B59">2013</xref>), an open-source platform, has been used to simulate the mouse whole-brain network activity, while NEST (Hahne et al., <xref ref-type="bibr" rid="B32">2021</xref>), another open-source platform, has been employed for the simulation of the CA1 neuronal network dynamics. This specific application illustrates this novel design pattern&#x00027;s technical limitations and demonstrates the potential for a wider range of applications.</p>
<sec>
<title>2.1 Virtual experiment of hippocampal CA1 embedded in a full mouse brain</title>
<p>The virtual experiment of the mouse brain is composed of a brain network model, regional neuronal network models and electrophysiological sensor models. The whole-brain animal model is a network comprised of nodes and edges, where each node contains a neural mass model to simulate the activity of each region and where edges represent the anatomical connections among the regions. The anatomical connections are defined by track lengths and an adjacency matrix representing the coupling strengths of connections between the regions of the network, the &#x0201C;connectome&#x0201D;, which are extracted from tracer data from the Allen Institute (Oh et al., <xref ref-type="bibr" rid="B53">2014</xref>) (<xref ref-type="fig" rid="F2">Figures 2F</xref>, <xref ref-type="fig" rid="F2">G</xref>). The dynamic activity of each brain region is obtained with the neural mass model described by di Volo et al. (<xref ref-type="bibr" rid="B18">2019</xref>) (see Section 3). The neuroinformatics platform The Virtual Brain (TVB) (Sanz Leon et al., <xref ref-type="bibr" rid="B59">2013</xref>) performs the animal whole-brain simulation by considering both the chosen neural mass model and specific &#x0201C;connectome&#x0201D;.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>The virtual mouse brain experiment. <bold>(A)</bold> Cross section of the mouse brain with the position of the left implanted electrode. <bold>(B)</bold> Position of the site layout of a polytrode (Neuronexus 32 models from MEAutility library). <bold>(C)</bold> The probe position inside the neural network. The red neurons are pyramidal neurons (Shuman et al., <xref ref-type="bibr" rid="B65">2020</xref>) and the blue neurons are basket cells (Shuman et al., <xref ref-type="bibr" rid="B65">2020</xref>). <bold>(D)</bold> Mouse brain of Allen Institute (Lein et al., <xref ref-type="bibr" rid="B41">2007</xref>) with the position of the two polytrodes and 16 ECOG electrodes. The ECOG electrodes measure the neural field from the surface of the electrode in blue for the left hemisphere and yellow for the right hemisphere. Blue spheres mark the centers of mouse brain regions, and the red spheres are a subset of neurons of the CA1. <bold>(E)</bold> Representation of the connectome of the mouse brain (Oh et al., <xref ref-type="bibr" rid="B53">2014</xref>). The blue dots are brain regions, and the red ones are CA1 regions, whose neurons are simulated with NEST. The gray links highlight the strongest anatomical connections. <bold>(F)</bold> The weights of the anatomical links in graphic F are shown as an adjacency matrix. <bold>(G)</bold> The tract lengths associated with F are shown as an adjacency matrix. The anatomical connections are extracted from tracer data of the Allen Institute (Oh et al., <xref ref-type="bibr" rid="B53">2014</xref>). <bold>(H)</bold> Example of voltage recorded from 10 excitatory and 10 inhibitory neurons. <bold>(I)</bold> Example of adaptation currents recorded from 10 inhibitory and 10 excitatory neurons. <bold>(J)</bold> Example of spike trains recording from the left CA1. <bold>(K)</bold> Example of Local Field Potential recorded from the poly-electrode generated from the spike trains and neuron morphologies. <bold>(L)</bold> Example of recording from the ECOG electrodes of the left hemisphere. <bold>(M)</bold> Example of mean firing rate of excitatory and inhibitory populations for a subset of mouse brain regions.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fninf-18-1156683-g0002.tif"/>
</fig>
<p>The dynamics of the two main brain regions of interest, the left and right hippocampus CA1 (<xref ref-type="fig" rid="F2">Figure 2</xref>), are modeled as a separate neural network composed of point neurons connected with static synapses. Each network comprises one inhibitory, and one excitatory homogeneous population of adaptive exponential integrate and fire neurons (Brette and Gerstner, <xref ref-type="bibr" rid="B9">2005</xref>) (see Section 3). In each microcircuit, the populations of point neurons are taken to be homogeneous; that is, neurons of the same population have the same parameter values. The neuroinformatics platform NEST (Hahne et al., <xref ref-type="bibr" rid="B32">2021</xref>) is able to perform the regional neuronal network simulation using the aforementioned description of the microcircuit of point neurons.</p>
<p>To compare the simulations with empirical data, the virtual experiment contains two models of electrophysiology sensors for probing neural activity. The electrophysiological sensor models are two surface grids of 8-channel electrocorticography arrays and two penetrating multi-electrode arrays of 32 recording sites each. Their positions are illustrated in <xref ref-type="fig" rid="F2">Figure 2A</xref>. <xref ref-type="fig" rid="F2">Figure 2E</xref> shows the position of the polytrodes in the mouse brain, while <xref ref-type="fig" rid="F2">Figures 2B</xref>, <xref ref-type="fig" rid="F2">D</xref> depict the position of the left probes in a cross-section of the left hemisphere and the position of the point of the polytrodes in the population of neurons, respectively. <xref ref-type="fig" rid="F2">Figure 2C</xref> displays the polytrodes with the 32 recording sites. The simulated signal from the ECOG sensor is computed using the model of a point dipole in a homogeneous space as described by Sanz-Leon et al. (<xref ref-type="bibr" rid="B60">2015</xref>) (see Section 3) and the hybridLFPy (Hagen et al., <xref ref-type="bibr" rid="B31">2016</xref>) software is used for computing the signal from the recording site of the implanted probes (see Section 3). The latter software uses morphologies and spatial position of neurons to generate the underlying local field potential (LFP) for given spike trains of point neurons. The morphology of neurons is taken from the presented morphology in Shuman et al. (<xref ref-type="bibr" rid="B65">2020</xref>). The excitatory morphology is based on the pyramidal cell morphology, and inhibitory neurons are based on the basket cell morphology (Shuman et al., <xref ref-type="bibr" rid="B65">2020</xref>).</p>
</sec>
<sec>
<title>2.2 Output signal from the virtual experiment</title>
<p>This section describes the co-simulation results at different scales by describing the possible recordings of physiological signals from the simulation of CA1 embedded in a whole mouse brain. The Discussion section will provide an interpretation of these results to describe the advantages and the limitations of the multiscale co-simulation design pattern. As described in <xref ref-type="fig" rid="F2">Figure 2</xref>, the output modalities of one virtual experiment have direcly corresponding measures in the real world such as the local field potential measure at every thirty-two sites of each polytrode electrode (<xref ref-type="fig" rid="F2">Figure 2J</xref>) and from the sixteen electrocorticography channels of each hemisphere (<xref ref-type="fig" rid="F2">Figure 2K</xref>). Moreover, the simulation gives access directly to the voltage membranes of the CA1 neurons (<xref ref-type="fig" rid="F2">Figure 2H</xref>), adaptive current of the CA1 neurons (<xref ref-type="fig" rid="F2">Figure 2G</xref>), spike times (<xref ref-type="fig" rid="F2">Figure 2I</xref>) and the mean firing rate of the different regions of the mouse brain (<xref ref-type="fig" rid="F2">Figure 2M</xref>). To illustrate the variability of the measures and some limitations of the coupling model of different scales, we choose three sets of different parameters for CA1 and neural masses. Each set of parameters represents one of three dynamic regimes of the CA1. These results are separated between micro (<xref ref-type="fig" rid="F3">Figure 3</xref>) and macro (<xref ref-type="fig" rid="F4">Figure 4</xref>) scales, but they are the output of the simulation workflow between TVB and NEST. In particular, <xref ref-type="fig" rid="F3">Figure 3</xref> reports the mean voltage membrane, mean adaptive current, instantaneous firing rate and the signal of 12 central sites from the 32 electrode sites of the specific CA1 network. <xref ref-type="fig" rid="F4">Figure 4</xref> displays the results on the whole brain level: the mean firing rate of each brain region, the signal of the 16 electrocorticography channels and the mean firing rate from the spiking neural network.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Spiking neural network in three different states of the left CA1. The parameterization of the spiking neural network of CA1 is chosen such that the dynamics are in an asynchronous state <bold>(A)</bold>, irregular synchronization state <bold>(B)</bold>, and regular bursting <bold>(C)</bold>. Top-left: Voltage membrane of 20 adaptive exponential leaky and integrator neurons and their mean in a thick line. The red (blue) lines are excitatory (inhibitory) neurons. Middle-left: The adaptation currents of 10 neurons and their mean in a thick line. Bottom-left: Local field potential from the 12 sites in the middle line of the left polytrode. The local field potential is computed from the spike trains of all neurons by the software HybridLFPY (Kuhn et al., <xref ref-type="bibr" rid="B38">2003</xref>). Top-right: Spike trains of 10,000 neurons for 11s. Middle-right: instantaneous firing rate of the excitatory (inhibitory) population above in red (blue). Bottom-right: Spectrogram and power spectrum example of the instantaneous firing rate for 10s.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fninf-18-1156683-g0003.tif"/>
</fig>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Three different states of CA1 in mouse brain. The parameterization of the CA1 spiking neural network is defined to obtain an asynchronous state <bold>(A)</bold>, an irregular synchronization state <bold>(B)</bold>, and a regular bursting <bold>(C)</bold>. Top-left: Instantaneous firing rate of spiking neural networks in light red for 11 s. The thick line shows the sliding window mean firing rate. Bottom-left: (bottom-right) Signal from ECOG sensors, the figure represents the recording of the 8 electrodes on the top of the left (right) hemisphere. Right part: Subset of region overview of the mean firing rates of excitatory, in red, and inhibitory, in blue, population from the model of Mean Adaptive Exponential. The two black curves are the mean firing rate of the two populations of excitatory neurons simulated with NEST (Hahne et al., <xref ref-type="bibr" rid="B32">2021</xref>).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fninf-18-1156683-g0004.tif"/>
</fig>
<p>To illustrate the basic dynamic features of the network, we define three operating regimes corresponding to irregular asynchronous and synchronous activity, and regular bursting. The <xref ref-type="fig" rid="F3">Figures 3</xref>, <xref ref-type="fig" rid="F4">4</xref> are separated into three different panels, which correspond to the three dynamic regimes and corresponding parameters representative of the different types of dynamics exhibited by spiking neural networks (see Section 3 for the choice of these parameters). Panel A represents an asynchronous (A) state, which is characterized by a constant (flat line) the mean firing rate (see <xref ref-type="fig" rid="F3">Figure 3A</xref> top-right and <xref ref-type="fig" rid="F4">Figure 4A</xref> top-left). Panel B represents an irregular synchronous (IS) state, which reflects a large irregular variation of the mean firing rate (see <xref ref-type="fig" rid="F3">Figure 3B</xref> top-right and <xref ref-type="fig" rid="F4">Figure 4B</xref> top-left). Panel C represents regular bursting (RB) reflecting regular oscillations (see <xref ref-type="fig" rid="F3">Figure 3C</xref> top-right and <xref ref-type="fig" rid="F4">Figure 4C</xref> top-left) and a second dominant high frequency (see <xref ref-type="fig" rid="F3">Figure 3C</xref> bottom-right).</p>
<sec>
<title>2.2.1 Results at microscale</title>
<p>The top left of <xref ref-type="fig" rid="F3">Figures 3A</xref>&#x02013;<xref ref-type="fig" rid="F3">C</xref> show the membrane voltages for ten excitatory neurons (thin red curves) and ten inhibitory neurons (thin blue curves) and mean membrane voltage of these neurons (thick curves). The middle left of <xref ref-type="fig" rid="F3">Figures 3A</xref>&#x02013;<xref ref-type="fig" rid="F3">C</xref> represent the adaptive currents from the same ensemble of neurons (thin curves) and the mean adaptive current of these neurons (thick curves). The third biological observable from the simulation is the Local Field Potential which differs among panels (see bottom left of <xref ref-type="fig" rid="F3">Figures 3A</xref>&#x02013;<xref ref-type="fig" rid="F3">C</xref>). The top right of <xref ref-type="fig" rid="F3">Figures 3A</xref>&#x02013;<xref ref-type="fig" rid="F3">C</xref> display spike raster plots of the excitatory population, in red, and the inhibitory population, in blue, of the left CA1. The spiking activity is homogeneously distributed between neurons and time frames for the A state, while the other two states show co-activation of neurons with different periods. The associated instantaneous firing rate is shown in the middle right of <xref ref-type="fig" rid="F3">Figures 3A</xref>&#x02013;<xref ref-type="fig" rid="F3">C</xref>. The spectral analysis of the instantaneous firing rate displays a peak around 3 Hz for the IS state (bottom left of <xref ref-type="fig" rid="F3">Figure 3B</xref>), no peaks for the A state (bottom left of <xref ref-type="fig" rid="F3">Figure 3A</xref>), and two peaks (around 6 Hz and 160 Hz) for the RB state (bottom left of <xref ref-type="fig" rid="F3">Figure 3C</xref>). For the RS state, the frequency of the first peak, 6Hz, is also present in the mean of the adaptive currents, while the second peak is associated with the burst time, as shown in further detailed in <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 1</xref>.</p>
</sec>
<sec>
<title>2.2.2 Results at macroscale</title>
<p>The top left of <xref ref-type="fig" rid="F4">Figures 4A</xref>&#x02013;<xref ref-type="fig" rid="F4">C</xref> display the instantaneous firing rate (light red) of the spiking neural network with the associated transferred mean firing rate of the left region of CA1 (thick red line). The neural network&#x00027;s different states affect the ECOG signals, as shown in the bottom left of <xref ref-type="fig" rid="F4">Figures 4A</xref>&#x02013;<xref ref-type="fig" rid="F4">C</xref>. The mean firing rate of excitatory (blue) and inhibitory (red) populations of each brain region are plotted in the graph on the right part of <xref ref-type="fig" rid="F4">Figures 4A</xref>&#x02013;<xref ref-type="fig" rid="F4">C</xref> and <xref ref-type="supplementary-material" rid="SM1">Supplementary Figures 2</xref>&#x02013;<xref ref-type="supplementary-material" rid="SM1">4</xref>.</p>
</sec>
</sec>
<sec>
<title>2.3 Workflow between NEST and TVB</title>
<p>The previous multiscale example uses the workflow between TVB and NEST for the co-simulation. As an implementation of the design pattern, this workflow comprises five modules: two simulators (TVB and NEST), one launcher and two transfer modules. All these modules are built with the capability to be repurposed or replaced, allowing for adjustments of components of transfer modules or communication protocols (see Discussion). Two additional proofs of concept were implemented to demonstrate the possibility of the reusability of the components. The first example replaces NEST with NEURON, and the second replaces TVB with Neurolib (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 25</xref>). Moreover, without extra development, we get a proof of concept of co-simulation between NEURON and Neurolib.</p>
<p>The simulators perform the actual integration of the dynamics in time and require two properties to be integrated within one optimized and coherent workflow. The first property is time delay equation management, essential for reducing data transfer overhead. The second property is the presence of a high bandwidth Input/Output (I/O) interface that facilitates the efficient exchange of data and parallel execution of the simulators. Since TVB and NEST did not have generic high bandwidth I/O interfaces by default, these had to be implemented for each simulator. Details of how these I/O interfaces were created are reported in <xref ref-type="supplementary-material" rid="SM1">Supplementary File 1</xref>. Briefly, the NEST interface uses the device nodes with a specific back-end, while TVB uses proxy nodes which are the interface with the external software.</p>
<p>The launcher prepares the environment for the simulation and initiates all the other modules, as shown in <xref ref-type="fig" rid="F5">Figure 5A</xref> (see details in the <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 5</xref>). The preparation consists of creating folders for the different modules, the logger files, and the common file with all the parameters of the co-simulation. Creating the parameters file provides the functionality to enforce consistent constraints on the parameters to be shared between the modules, such as ensuring the same integration step in both simulators, which is needed for correct synchronization between modules.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Architecture and performance of the co-simulation. <bold>(A)</bold> The interaction among the modules and data exchanges during co-simulation execution. The boxes in yellow mark start-up: initialization and configuration, the boxes in red for the termination of the simulation and the boxes in white for the simulation phase. <bold>(B&#x02013;D)</bold> Performance of the workflow is obtained for 1 s of simulated time (see Section 3 for more details). The reference implementation use 1 MPI process, 6 virtual processes/threads, a synchronization time step of 2.0 ms, and simulates 20,000 neurons. <bold>(B)</bold> The wall clock time of the simulators as a function of the number of neurons. The total time of the co-simulation is represented in yellow. The &#x0201C;wait&#x0201D;, &#x0201C;simulation&#x0201D;, and &#x0201C;IO&#x0201D; times of NEST are represented in red surface with respectively hatches with big circles, small circles and points. The &#x0201C;simulation&#x0201D; and &#x0201C;IO&#x0201D; times of TVB (Sanz Leon et al., <xref ref-type="bibr" rid="B59">2013</xref>) are represented in the blue surface with respectively hatches horizontal lines and oblique lines. <bold>(C)</bold> Simulation time depending on the synchronized time between simulator. The color code is the same as the <bold>(B)</bold>. <bold>(D)</bold> Wall clock time depending on the number of virtual process used by NEST (Hahne et al., <xref ref-type="bibr" rid="B32">2021</xref>). The green, blue, purple, red curves are associated with different parallelization strategy of NEST, respectively, only multithreading, 2 MPI processes with threads, 4 MPI processes with thread, and only MPI processes. The vertical blue line represents the number of cores of the computer. <bold>(E)</bold> The &#x0201C;transform between spikes to rate&#x0201D; and &#x0201C;transform between rates to spikes&#x0201D; blocks are displayed with the different steps for transformation of data between TVB and NEST.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fninf-18-1156683-g0005.tif"/>
</fig>
<p>The transfer modules connect simulators by transferring data between scales and adapting the communication delay throughout the simulation. Each module is comprised of three components: two interfaces and one transformer (see <xref ref-type="fig" rid="F1">Figure 1A</xref>, <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 14</xref>). These components are implemented in different files for reusability and modularity and are tested independently to ensure robustness (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 12</xref>). The interfaces are specific to each simulator, while the transformation can be extended, modified or reused since the transformation function is implemented as an independent process (see <xref ref-type="supplementary-material" rid="SM1">Supplementary File 2</xref>).</p>
<p>The components exchange data using a simple Application Programming Interface (API). The API is based on four functions and assumes that the connections are already established. The functions are &#x0201C;check if ready to get or send data&#x0201D;, &#x0201C;transfer data&#x0201D;, &#x0201C;end the transfer&#x0201D; and &#x0201C;release the connection&#x0201D; (see <xref ref-type="supplementary-material" rid="SM1">Supplementary File 2</xref>, <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 15</xref>). The API is implemented with two different technologies depending on the nature of the parallelisation of the components (multiprocessing or multithreading). In the case of multiprocessing, each component runs in an individual process, and a Message Passing Interface (MPI) is used to transfer data. In the case of multithreading, each component runs in an individual thread in a shared process, and the data is transferred using shared memory. Multithreading uses fewer computational resources (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 20</xref>). The transformation function provides neural mass firing rate values by using a sliding window, shown in <xref ref-type="fig" rid="F5">Figure 5E</xref>. The panel also illustrates the inverse transformation from the mean firing rates to spike trains using a multiple interaction process (Kuhn et al., <xref ref-type="bibr" rid="B38">2003</xref>).</p>
<p>The modular workflow execution is composed of three main blocks: start-up, simulation-loop and termination (see <xref ref-type="fig" rid="F5">Figure 5A</xref> and details in the <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 5</xref>).</p>
<p>The start-up procedure allocates a logger for each component, facilitating debugging of the co-simulation. Subsequently, the modules and their communication channels are configured according to the parameter file. At this stage, several initialisation files are generated with simulation parameters only available after instantiation of the model (e.g. id of NEST devices and MPI port description). The details of the generation of these files are described in Section 3.2.1.</p>
<p>Once the simulation is launched, the simulator time clocks are synchronized using asynchronous message passing: At each multi-simulator synchronization step, the simulator receives input data via an asynchronous message in MPI, after which the next step is simulated. The transfer modules can buffer data for one synchronization step until the receiving simulator is available for receiving. Each simulator requires an initial condition (NEST: initial voltage membrane and adaptation current and TVB: state of the node during the previous seconds) and an initial message. For TVB, this initial message is sent by the transformer processes while, for NEST, it is produced by transforming the initial condition of TVB.</p>
<p>Ultimately, the termination occurs at the end of the simulation by the simulators themselves (see Section 3 for details).</p>
</sec>
<sec>
<title>2.4 Performance</title>
<p>The evaluation of the performance is made against a fictitious workflow with optimal performance, a co-simulation with instantaneous communications between simulators. As all the modules are designed to run in parallel, the co-simulation time for each module is identical and equal to the total running time. The focus is only on the simulator timers because the time of the transformer components is dominated by the waiting time of data (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 5</xref>). The total running time of the simulators is divided into five parts. The &#x0201C;initialisation&#x0201D; time is the time of configuring the simulators and creating connections. The &#x0201C;ending&#x0201D; time is the time of closing the connections, stopping the simulator engine and terminating processes. The &#x0201C;simulation&#x0201D; time is the total time of the internal computation of simulator engines. The &#x0201C;wait&#x0201D; time is the total duration of waiting time for access to the data to transfer by the simulator interface of the transformer module. The &#x0201C;IO&#x0201D; time is the total duration of functions for exchanging data between simulators and the transfer modules minus the &#x0201C;wait&#x0201D; time.</p>
<p>A perfect co-simulator has the time of the slowest simulator X; thus, &#x0201C;wait&#x0201D; and &#x0201C;IO&#x0201D; times equal zero. From <xref ref-type="fig" rid="F5">Figure 5B</xref> and the <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 17</xref>, the actual implementation is close to ideal when the number of neurons simulated by NEST is lower than 1000. In this case, TVB is the slower simulator, and NEST spends most of the time waiting for data from TVB.</p>
<p>When the number of simulated neurons is between 1, 000 and 20, 000 neurons, &#x0201C;simulation&#x0201D; time of TVB is approximately the same as the sum of &#x0201C;simulation&#x0201D; time and &#x0201C;IO&#x0201D; time of NEST. In this condition, each simulator is waiting for the transformation of the data among scales.</p>
<p>When the number of simulated neurons is higher than 20,000, NEST is the slowest simulator. In this case, the co-simulation time is determined by the &#x0201C;simulation&#x0201D; time and the &#x0201C;IO&#x0201D; of NEST. The &#x0201C;wait&#x0201D; time is zeros, and the &#x0201C;IO&#x0201D; time is higher than the &#x0201C;simulation&#x0201D; time (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figures 17</xref>, <xref ref-type="supplementary-material" rid="SM1">21</xref>). The two principal causes are that the communication between modules is slower than inside the modules and the increased dimensionality of the input to NEST (Weidel et al., <xref ref-type="bibr" rid="B68">2016</xref>) (the increase of the number of neurons increasing the size of the neural spike data because each neuron in NEST receives an individual spike train). A closer look at the performance shows that the communication spends most of the time sending individual spike trains to NEST (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 24</xref>). However, the data size is related to the model chosen and can be reduced.</p>
<p>As shown by <xref ref-type="fig" rid="F5">Figures 5C</xref>, <xref ref-type="fig" rid="F5">D</xref>, some optimisations can be implemented to reduce the problem of overhead time of communication (Weidel et al., <xref ref-type="bibr" rid="B68">2016</xref>). <xref ref-type="fig" rid="F5">Figure 5C</xref> and <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 18</xref> represents the time delay between brain regions when delayed data is aggregated to reduce the &#x0201C;IO&#x0201D; time and, hence, the co-simulation time. In this case, the simulators are not synchronized at each time step but at n time steps (limited by the model of connection). This aggregation can reduce co-simulation time by a factor of 6 (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figures 18</xref>, <xref ref-type="supplementary-material" rid="SM1">22</xref>). <xref ref-type="fig" rid="F5">Figure 5D</xref> and <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 19</xref> represent a reduction in co-simulation time by reducing the &#x0201C;simulation&#x0201D; time of the longest simulator. The increase in NEST&#x00027;s resources does not modify the &#x0201C;IO&#x0201D; time until the resource is available. Since the tests are running on one computer, increasing resources for NEST increases the &#x0201C;simulation&#x0201D; time of TVB and reduces the &#x0201C;simulation&#x0201D; time of NEST. However, by deploying the workflow on high-performance computing facilities, the latter result does not replicate, and the simulation time gives similar a result with an increase in &#x0201C;IO&#x0201D; and &#x0201C;simulation&#x0201D; time because the communication between nodes is slower (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 23</xref>). The second reason for this difference is the usage of multiprocessing for the transfer modules (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 20</xref>). In your implementation in Python, the multithreading is unstable on a supercomputer due to the global interpreter lock of Python (for more details, see the Section 3.4.1). Additionally, multiprocessing compared to multithreading, has the potential to distribute the different components of the transfer modules on different nodes.</p>
</sec>
</sec>
<sec sec-type="materials and methods" id="s3">
<title>3 Materials and methods</title>
<p>The simulation details and models&#x00027; parametrisation are in <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 1</xref>. The format of this table is drawn from the proposition of Nordlie et al. (<xref ref-type="bibr" rid="B52">2009</xref>) for spiking neural networks. This new format includes the description of brain network modeling, the description of the coupling between scales and the description of the measurements of the simulation. This format contains more details than the proposition of Nordlie et al. because it contains all the parameters for the co-simulations.</p>
<p>The following text provides an overview of the models, communication between modules, details of the performance tests and implementation details.</p>
<sec>
<title>3.1 Models</title>
<sec>
<title>3.1.1 CA1 model</title>
<p>The spiking neural network of CA1 comprises two regions (left and right), which contains two populations, 8,000 excitatory neurons and 2000 inhibitory neurons. This network is simulated by NEST (Hahne et al., <xref ref-type="bibr" rid="B32">2021</xref>), a neuro-informatics platform for spiking neural networks. The adaptive exponential integrate and fire neurons (Brette and Gerstner, <xref ref-type="bibr" rid="B9">2005</xref>) are connected by exponential conductance-based synapses with a connection probability of 5% inside the region. The excitatory population establishes normalized weighted connections with the other regions defined by the mouse connectivity atlas. Additionally, we assume that each neuron has the same unique number of synaptic connections from other brain regions; the mouse connectome defines the repartition of these synapses. Transmission delay between regions is defined as the ratio of the distance between the regions and the transmission speed. Calculating these ratios is part of the configuration of The Virtual Brain (TVB) (Sanz Leon et al., <xref ref-type="bibr" rid="B59">2013</xref>) because the data required by TVB is the track lengths between regions and the speed of the transmission. Within a region, the synaptic transmission delay is instantaneous, which is implemented in NEST by setting the delay to the smallest transmission delay supported. In addition, the neurons can receive external noise input modeled as an independent Poisson process in addition to the external stimuli received from other regions through the transfer of mean firing rates as transformed spike trains.</p>
</sec>
<sec>
<title>3.1.2 Mouse brain model</title>
<p>The mouse brain model is simulated using The Virtual Brain (Sanz Leon et al., <xref ref-type="bibr" rid="B59">2013</xref>; Melozzi et al., <xref ref-type="bibr" rid="B46">2017</xref>), a neuro-informatics platform for connectome-based whole-brain network modeling. The &#x0201C;connectome&#x0201D; used here is extracted from Allen Mouse Brain Connectivity Atlas (Oh et al., <xref ref-type="bibr" rid="B53">2014</xref>) in 2017. The large-scale brain network is comprised of linearly coupled neural mass models. Specifically, the model representing each region is a second-order Mean Ad Ex model (di Volo et al., <xref ref-type="bibr" rid="B18">2019</xref>) with adaptation, representing the mean firing rate for an ensemble of one excitatory and one inhibitory neuronal population.</p>
</sec>
<sec>
<title>3.1.3 Electrophysiological monitoring model</title>
<p>The electrophysiological monitoring variables are computed using two models representing the cortical and implanted sensors. The electrocorticography model is a simple forward solution of a dipole at the region level. The electric field recorded by the virtual sensors at the brain level is based on two assumptions: considering the brain as a homogeneous space, and the field is generated only from excitatory populations. With these assumptions, the recorded field is the sum of excitatory population activities, i.e. the mean excitatory firing rate weighted by the distance between the sensors and the region&#x00027;s center (Sanz-Leon et al., <xref ref-type="bibr" rid="B60">2015</xref>). The implanted sensors&#x00027; signals are computed from point-neuron activities using a hybrid scheme for modeling local field potentials (LFP). Specifically, each potential is simulated using hybridLFPy (Hagen et al., <xref ref-type="bibr" rid="B31">2016</xref>), which incorporates the recorded spike from the network and the morphology of the pyramidal and basket cells.</p>
</sec>
<sec>
<title>3.1.4 Choice of three sets of parameters</title>
<p>Three parameter regimes were implemented to simulate well-known characteristic neural network dynamics: irregular synchronous, irregular asynchronous, and regular bursting.</p>
<p>The parameters for Irregular Synchronous state follow the work of di Volo et al. (<xref ref-type="bibr" rid="B18">2019</xref>). The coupling between regions and the noise is tuned manually to the regime of fluctuations of the firing rate in each region. The Asynchronous state was realized by reducing the degree of fluctuations. The result is a reduction of the spike-triggered adaptation of the excitatory neurons, a reduction of the number of connections between the regions, an augmentation of the inhibitory synaptic weights, a reduction of the variance of the noise and the addition of a Poisson generator for the spiking neural network. The Regular Bursting state is obtained when changing the voltage reset of the membrane and the leak of the reversal potential of the excitatory and inhibitory neurons, the spike-triggered adaptation and the time constant of the adaptation current of excitatory neurons. An empirical exploration of the models is done to get a balanced spiking neural network and the desired brain dynamic. The result of this exploration is a reduction of the connection between regions and a reduction of the connection between excitatory and inhibitory neurons, a reduction of the number of connections between brain regions and a reduction of the noise variance.</p>
<p>All the numerical values of the parameters are in <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 1</xref>.</p>
</sec>
</sec>
<sec>
<title>3.2 Communication between modules</title>
<sec>
<title>3.2.1 Initialization of communication</title>
<p>During the initialization of the simulation, the launcher creates a specific folder for each module and an extra folder for the logger file of all components. Subsequently, the launcher creates user-defined relationships between parameters, such as copying one parameter into another to have the same values or the result of linear functions of parameters. All these parameters are saved in a file and organized in sections dedicated to a module or part of the co-simulation. The launcher also saves the initial message sent to TVB.</p>
<p>Once each module is launched, they will create some files in the folders generated by the launcher to initialize the communication. NEST will create two files with the ids of devices for recording and generating spikes, which are used by the transformer modules for sending and receiving spikes to the right devices. Transformer modules will create files containing the MPI port description which are used by NEST and the wrapper of TVB for connecting to them. TVB saves its initial conditions to allow possible reproducibility of the co-simulation.</p>
</sec>
<sec>
<title>3.2.2 Synchronization between modules</title>
<p>The transfer modules synchronize the simulation by managing the access to its internal buffer and receiving status messages from the simulators. The receiver process receives the data and aggregates them in a buffer. Rate data do not need to be buffered when using MPI communication, they are sent or received directly to the transformer process. The data is transferred to the transformation function when the data of the preceding step are transformed and transferred to the sender process. The sender process gets the data after sending the data of the preceding step to the simulator. It can only send the data to the simulator when it is ready. In addition, the simulator needs to await data for the next step of the simulation. Given all these constraints, the transfer module assures correct transport and keeps the components synchronized. If needed the transfer module buffers data for a simulation step. The transfer module can receive and send data concurrently and translation can be performed while waiting for the slowest simulator.</p>
</sec>
</sec>
<sec>
<title>3.3 Performance tests</title>
<p>The performance tests are realized with time recorders integrated at specific places in the code. These times are aggregated durations to evaluate the running time of the co-simulation in each section. This allows evaluating the time of &#x0201C;simulation&#x0201D;, &#x0201C;IO&#x0201D; and &#x0201C;wait&#x0201D; time. Each test is done for 10 trials of 1 second of simulated time for asynchronous configurations with one or two parameters (number of neurons, synchronization step, number of virtual processes of NEST, number of processes dedicated to NEST and number of nodes used by NEST) which vary per test. The results of the trials are averaged to reduce the variability of the measurements. The varied parameters of the tests are the number of spiking neurons, synchronized time between simulators and the configuration of MPI and thread of NEST. <xref ref-type="fig" rid="F5">Figure 5</xref> and <xref ref-type="supplementary-material" rid="SM1">Supplementary Figures 17</xref>&#x02013;<xref ref-type="supplementary-material" rid="SM1">19</xref> show the result of the performance test done on DELL Precision-7540 [Intel Xeon(R) E-2286M CPU 2.40 GHz &#x0002A; 8 cores &#x0002A; 2 threads, 64 GB of Ram with Ubuntu 18.04.5]. The communication between components in the transfer module was performed with the multithreading approach. <xref ref-type="supplementary-material" rid="SM1">Supplementary Figures 21</xref>&#x02013;<xref ref-type="supplementary-material" rid="SM1">23</xref> are generated using the Jusuf system (<ext-link ext-link-type="uri" xlink:href="https://apps.fz-juelich.de/jsc/hps/jusuf/cluster/configuration.html">https://apps.fz-juelich.de/jsc/hps/jusuf/cluster/configuration.html</ext-link>) which is composed of nodes with 2 AMD EPYC 7742 2.25 GHz &#x0002A; 64 cores &#x0002A; 2 threads, 256 (16x16) GB DDR4 with 3,200 MHz, connected by InfiniBand HDR100 (Connect-X6). In this second case, the transfer module uses MPI protocol to communicate between components.</p>
</sec>
<sec>
<title>3.4 Implementation details</title>
<p>The source code of the co-simulation is open-source and contains Python script and C&#x0002B;&#x0002B; files. A singularity and a docker image are also available on singularity-hubs to replicate the figures as in the performance test. The activity diagram (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 5</xref>) describes in detail the interaction between each module and components for this specific virtual experimentation.</p>
<p>The implementation of Input and Output for NEST used the existing simulator&#x00027;s architecture and parallelization strategy. NEST has different back-ends for the input and output data, the creation of a new back-end for the communication of the data was enough for integration in the co-simulation design pattern (for more details see <xref ref-type="supplementary-material" rid="SM1">Supplementary File 1</xref>). For more technical details about the communication with NEST, an activity diagram (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 6</xref>) describes the communication protocol with NEST back-end. For this specific example, the states of the wrapper of NEST and the states of transfer components which communicate with NEST are described respectively by the <xref ref-type="supplementary-material" rid="SM1">Supplementary Figures 7</xref>, <xref ref-type="supplementary-material" rid="SM1">8</xref>.</p>
<p>The implementation of Input and Output for TVB is different because TVB doesn&#x00027;t use MPI for its parallelization and it doesn&#x00027;t have an interface for exchanging data outside of the simulator. The creation of the interface required a modification of the simulator engine during its configuration for integrating the functions to exchange data with the transformer and a wrapper for communication with the transformer modules (for more details see <xref ref-type="supplementary-material" rid="SM1">Supplementary File 1</xref>). For more technical details about the communication with TVB, an activity diagram (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 9</xref>) describes the communication protocol with the TVB wrapper. For this specific example, the states of the wrapper of TVB and the states of transfer components which communicate with the wrapper of TVB are described respectively by the <xref ref-type="supplementary-material" rid="SM1">Supplementary Figures 10</xref>, <xref ref-type="supplementary-material" rid="SM1">11</xref>.</p>
<p>The description of the transfer modules is partially described in <xref ref-type="supplementary-material" rid="SM1">Supplementary File 2</xref> which focuses only on the interface with simulators. In addition to this note, the state of the different components are described in the <xref ref-type="supplementary-material" rid="SM1">Supplementary Figures 8</xref>, <xref ref-type="supplementary-material" rid="SM1">11</xref>, <xref ref-type="supplementary-material" rid="SM1">13</xref>. To better understand different instances and classes in this module, <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 14</xref> describes all the instances and their role and <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 15</xref> describes the composition of the abstract class and the simple API for communication. The communication protocol for data exchange between transfer module components differs depending on whether the parallelization strategy is multithreading or multiprocessing. In the case of multiprocessing, MPI protocol is used for data exchange. The communication protocol differs depending on the data type, as shown by <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 16</xref>. The spike trains are variable and large data (they can go from a few Kilobytes to more than one Megabyte depending on the firing rates). According to it, we choose to use shared memory for transferring data. For the mean rate data, the data size is constant and small (a few Kilobytes depending on the number of regions). According to it, we choose to use the functions Send and Receive of MPI protocol for transferring the data. In the case of multithreading, only a shared buffer is used between threads.</p>
<sec>
<title>3.4.1 Deadlock due to global interpreter of python</title>
<p>In the case of multithreading for internal communication in the transfer modules, the program may be in a deadlock because the interface with a simulator does not receive the information of receiving data. As it is explained in the global interpreter lock documentation, &#x0201C;The GIL (global interpreter lock) can cause I/O-bound threads to be scheduled ahead of CPU-bound threads, and it prevents signals from being delivered&#x0201D; (<ext-link ext-link-type="uri" xlink:href="https://wiki.python.org/moin/GlobalInterpreterLock">https://wiki.python.org/moin/GlobalInterpreterLock</ext-link>). The consequence is that some signals used by MPI are not delivered, which creates a situation where a simulator and a transformer are waiting for an MPI message from the other one, but these messages will never arrive.</p>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>4 Discussion</title>
<p>The Parallel Co-simulation toolbox presented here provides co-simulation technology linking two simulators operating at two different scales with the only two requirements that the simulator simulates time delay equations and has an interface for sending and receiving data from outside of itself. In our application, the simulator needs to use MPI to send and receive data. This workflow is based on the cyclic coupling topology of modules (Chopard et al., <xref ref-type="bibr" rid="B12">2014</xref>), i.e each module regularly receives new inputs during the simulation. The two scale-specific simulators are interchangeable due to the genericity of the transfer function, as well as the modularity and design of the transfer module (for more characterization of the workflow, see <xref ref-type="supplementary-material" rid="SM1">Supplementary File 3</xref>). The interfaces of the simulators and other modules serve as a software science co-design pattern and can be reused in other studies involving co-simulations.</p>
<p>Our approach separates the theoretical challenge of coupling models at different scales from the technical challenge of coupling the corresponding simulators. The simplicity of the design pattern allows the scientific community to advance their research project without being hindered by technical details. Best practices are advised on carrying out a task or implementing the design pattern. These challenges are not unique to using the Parellel Co-simulation toolbox, but apply to most technical implementations of multiscale modeling software. On the technical side, the design pattern does not provide guidelines for the co-simulation&#x00027;s robustness, management and maintenance, similar to the closely related staged deployment and support software for multiscale simulations developed in EBRAINS (<ext-link ext-link-type="uri" xlink:href="https://juser.fz-juelich.de/record/850819">https://juser.fz-juelich.de/record/850819</ext-link>). On the conceptual side, for proper use of co-simulation technology, a profound understanding of the involved models is necessary to avoid operating the models outside of their valid parameter ranges. For instance, the neural mass model used in this paper cannot capture the fast scale dynamics, especially the fast regimes of regular burst state (RS) (Boustani and Destexhe, <xref ref-type="bibr" rid="B6">2009</xref>). In the neural mass model&#x00027;s derivation, the input firing rate of the neurons is assumed to be an adiabatic process, which is valid in some parameter regimes, but violated for the irregular synchronous state (IS), in which rapid transitions between low and high firing rates occur. As co-simulation requires an understanding of models typically used in at least two different and non-overlapping fields, particular attention should be paid to the responsible use of multiscale models. Such caution should also be applied here when interpreting the results of the CA1 model and the full brain network model used in this paper. Numerical errors constitute another issue. As these errors cannot be estimated analytically, the alternative solution is to perform a sensitivity analysis or uncertainty quantification to determine whether or not the simulation result is reliable (Coveney and Highfield, <xref ref-type="bibr" rid="B16">2021</xref>; Coveney et al., <xref ref-type="bibr" rid="B15">2021</xref>).</p>
<p>For the validation of the co-simulation, it is essential to generate data that can be related to real-world observations, as is the case here with the model of the two types of electrodes. A critical issue is the repeatability and reproducibility of the simulations. Repeatability is ensured by managing all the random generators in each simulator and using a single parameter file for the co-simulation setup. For reproducibility, due to the complexity of the network, a table is proposed where the configuration of each simulator is reported with their version and also the description of the transformation modules (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 1</xref>). A notable property of this design pattern is the independence of its modules and components. This independence allows unit testing for each of them. Our design pattern also requires the implementation of a minimal reusable simulator interface for interaction between simulators. In a possible second stage, this interface can be adapted to a standard to increase the possibility of interaction with other simulators.</p>
<p>EBRAINS provides two solutions for co-simulation (Schirner et al., <xref ref-type="bibr" rid="B62">2022</xref>) using The Virtual Brain, that is TVB-Multiscale tool and the here described Parallel Co-simulation toolbox. The two tools implement conceptually and technically two different solutions. The TVB Multiscale tool focuses on user convenience, allowing for rapidly prototyping scientific use cases using a single interface to configure all modules in the co-simulation. It is based on serial approaches for the co-simulation, i.e., each module is run one after the other. The Parallel Co-simulation tool, on the other hand, focuses on optimizing performance. The detailed description, benchmarking and validation of the Parallel Co-simulation toolbox is the topic of the current manuscript. Consequently, the TVB-Multiscale tool is slower. Performance tests show that the various modules run in parallel and adapt to the slowest module (see Section 2.4). The waiting time of the slowest modules is quasi-null, which means there is no loss of time in the synchronization of modules. Performance in co-simulation is an important criterion, as the microscopic simulators are typically very high-dimensional and hence computationally costly. The serial approaches can be interesting when a computer does not have at least one CPU core per module because, under this condition, modules need to share resources which can slow the co-simulation. This was demonstrated by the large increase of simulation time when the number of virtual processes for NEST is higher than the number of physical CPU cores. The other important distinguishing feature of the two co-simulation toolboxes is the unique interface for multiscale simulations. Similarly to TVB-Multiscale, multiscale simulators have the advantage of having a unique interface for multiscale simulations. This unique interface simplifies the simulation configuration but also reduces the specificity of functionality for each scale, which may be disadvantageous for some situations, such as optimization. For example, in our application, spiking neuron and brain region models require different integrators to avoid numerical errors and enhance efficiency. The CA1 model is a sparsely connected network of thousands of neurons using event communications. The mouse brain is a fully connected network of hundreds of regions based on continuous communications. Consequently, the optimization strategy is different and requires specificity.</p>
<p>Other existing frameworks to deploy and communicate runtime data between simulators comparable to Parallel Co-simulation include the Multi-Simulation Coordinator (MUSIC) (Djurfeldt et al., <xref ref-type="bibr" rid="B19">2010</xref>). By default, MUSIC does not include modules that facilitate translation between scales, which is needed when coupling simulators on different scales of abstraction. An extension of MUSIC has been proposed in Jordan et al. (<xref ref-type="bibr" rid="B37">2019</xref>) which proposes encoders and a decoder for transforming data and adapters for connecting to other systems of communication, such as ZeroMQ (Hintjens, <xref ref-type="bibr" rid="B34">2013</xref>) and ROS (Quigley et al., <xref ref-type="bibr" rid="B55">2009</xref>). The design of this extension has some similarities to our design pattern (Weidel et al., <xref ref-type="bibr" rid="B68">2016</xref>) and allows easy extension to include new methods in the future. However, the main difference with this extension is the parallelization of modules. On a more technical level, a second difference is how MUSIC uses the HPC transport protocol Message Passing Interface (MPI) (Message Passing Interface Forum, <xref ref-type="bibr" rid="B47">2015</xref>). MUSIC takes ownership of the highest level MPI environment (MPI_COMM_WORLD); this can cause challenges when integrating MUSIC with simulators that expect exclusive ownership of this highest level. Our implementation does not touch this highest-level ownership. We use MPIs client-server functionality to connect between simulators, completely evading this challenge. This difference in MPI usage also allows better use of the HPC scheduling mechanisms as each simulator is deployed in isolation, facilitating optimal workload placement on the hardware available. MUSIC does support several features currently not implemented in our implementation of the design pattern: multi-rate integration, i.e., different frequencies of sending and receiving data from simulators, generic configuration file and it prevents some simulation errors by using the MPI error system. On the other hand, our implementation of the design pattern allows for easy extension with new simulators and better distribution on HPC systems.</p>
<p>Outside of neuroscience, standards exist for co-simulation, such as High Level Architecture (HLA) (Saunders, <xref ref-type="bibr" rid="B61">2010</xref>) and Functional Mock-up Interface (FMI) (Andreas Junghanns et al., <xref ref-type="bibr" rid="B3">2021</xref>). These standards include error management, multi-rate integration, and data management (Saunders, <xref ref-type="bibr" rid="B61">2010</xref>; Blockwitz et al., <xref ref-type="bibr" rid="B4">2012</xref>). The main difference between MUSIC and our design pattern with these two standards is the communication strategy between simulators. FMI provides a standard for exchanging models and for scheduled execution (Andreas Junghanns et al., <xref ref-type="bibr" rid="B3">2021</xref>). Features of FMI and MUSIC currently not implemented in our design pattern are: Real-time hardware interactions (Moren et al., <xref ref-type="bibr" rid="B49">2015</xref>; Andreas Junghanns et al., <xref ref-type="bibr" rid="B3">2021</xref>). Additionally, FMI supports signal extrapolation for error reduction (Blockwitz et al., <xref ref-type="bibr" rid="B4">2012</xref>), although this could be added in the translation modules central in our design pattern. FMI does not support concurrent execution of the different simulators, although internally, the simulations can be parallelized (Andreas Junghanns et al., <xref ref-type="bibr" rid="B3">2021</xref>) and FMIGo proposes a parallelization implementation of FMI (Lacoursire and Hrdin, <xref ref-type="bibr" rid="B39">2017</xref>). HLA is designed for distributed systems and provides a standard for data exchange and time management of simulators (Neema et al., <xref ref-type="bibr" rid="B50">2014</xref>; Gutlein et al., <xref ref-type="bibr" rid="B30">2020</xref>). HLA facilitates the re-usability and interoperability of simulators and models by describing each component&#x00027;s roles and interactions. It further formalizes the data exchange and coordination between simulators and follows the publish/subscribe pattern. This standard specifies the definition of information produced or required by simulators. It provides a common data model for the reconciliation of model definitions and interoperability of simulators including during distributed runtime execution. Typically these services are implemented with a centralized communication architecture (Gutlein et al., <xref ref-type="bibr" rid="B30">2020</xref>), sometimes described as a hub and spoke model. Both MUSIC and our design pattern use direct, peer-to-peer communication.</p>
<p>In addition to the simulator interfacing standard, there are standards more focused on coupling models at different scales, such as MUSCLE2 (Borgdorff et al., <xref ref-type="bibr" rid="B5">2014</xref>), Yggdrasil (Lang, <xref ref-type="bibr" rid="B40">2019</xref>) and Vivarium (Agmon et al., <xref ref-type="bibr" rid="B1">2022</xref>). This different standard provides solutions to the semantic and syntactic problems induced by multi-scale issues that are not addressed by our design pattern. Our design scheme is model-independent and delegates responsibility for the consistency of multi-scale modeling to users. However, one of these standards can be merged to provide a complete digital platform for multi-scale co-simulation.</p>
<p>In summary, we have presented a new software science co-design pattern of the Parallel Co-simulation tool for coupling simulators with a transformation module. This design pattern provides the first step for developing platforms using transitional scaling models and structuring the future syntactic, semantic and conceptual issues induced by multiscale problems. The optimization for this workflow is based on the communication delay between scales. It is not generalized for all cases but recommended for models with transmission line element method (Braun and Krus, <xref ref-type="bibr" rid="B8">2016</xref>) or waveform relaxation method (Nguyen et al., <xref ref-type="bibr" rid="B51">2007</xref>).</p>
</sec>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found below: <ext-link ext-link-type="uri" xlink:href="http://dx.doi.org/10.5281/zenodo.7259022">http://dx.doi.org/10.5281/zenodo.7259022</ext-link>. The co-simulation between TVB and NEST is freely available under v2 Apache license at <ext-link ext-link-type="uri" xlink:href="https://github.com/multiscale-cosim/TVB-NEST">https://github.com/multiscale-cosim/TVB-NEST</ext-link> on the branch Paper TVB-NEST and Paper TVB-NEST with timer. A docker container containing the project is freely downloaded on Ebrains (<ext-link ext-link-type="uri" xlink:href="https://docker-registry.ebrains.eu/harbor/projects/53/repositories">https://docker-registry.ebrains.eu/harbor/projects/53/repositories</ext-link>).</p>
</sec>
<sec sec-type="author-contributions" id="s6">
<title>Author contributions</title>
<p>Conceptualization: LK, SD-P, and WK. Methodology and investigation: LK, SD-P, WK, and KS. Visualization and writing&#x02014;original draft: LK. Supervision: AM and VJ. Writing&#x02014;review and editing: LK, SD-P, WK, KS, CB, and VJ. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<sec sec-type="funding-information" id="s7">
<title>Funding</title>
<p>This research has received funding from the European Union&#x00027;s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreement No. 945539 (Human Brain Project SGA3) and Specific Grant Agreement No. 785907 (Human Brain Project SGA2). We acknowledge the use of Fenix Infrastructure resources, which were partially funded from the European Union&#x00027;s Horizon 2020 research and innovation programme through the ICEI project under the grant agreement No. 800858.</p>
</sec>
<ack><p>The authors would like to thank Mario Lavanga for helpful feedback, Ingles Chavez Rolando for technical support, and numerous colleagues for comments about the texts and figures. Also, the authors gratefully acknowledge the support team of the supercomputer DEEP-EST and JURECA at Forschungszentrum J&#x000FC;lich and Piz Daint at CSCS&#x02014;Swiss National Supercomputing Center.</p>
</ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s8">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s9">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fninf.2024.1156683/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fninf.2024.1156683/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Agmon</surname> <given-names>E.</given-names></name> <name><surname>Spangler</surname> <given-names>R. K.</given-names></name> <name><surname>Skalnik</surname> <given-names>C. J.</given-names></name> <name><surname>Poole</surname> <given-names>W.</given-names></name> <name><surname>Peirce</surname> <given-names>S. M.</given-names></name> <name><surname>Morrison</surname> <given-names>J. H.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Vivarium: an interface and engine for integrative multiscale modeling in computational biology</article-title>. <source>Bioinformatics</source> <volume>38</volume>, <fpage>1972</fpage>&#x02013;<lpage>1979</lpage>. <pub-id pub-id-type="doi">10.1093/bioinformatics/btac049</pub-id><pub-id pub-id-type="pmid">35134830</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Akar</surname> <given-names>N. A.</given-names></name> <name><surname>Cumming</surname> <given-names>B.</given-names></name> <name><surname>Karakasis</surname> <given-names>V.</given-names></name> <name><surname>Ksters</surname> <given-names>A.</given-names></name> <name><surname>Klijn</surname> <given-names>W.</given-names></name> <name><surname>Peyser</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>&#x0201C;Arbor a morphologically-detailed neural network simulation library for contemporary high-performance computing architectures,&#x0201D;</article-title> in <source>2019 27th Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP)</source>, eds. F. Leporati, G. Danese, E. Torti, and D. D&#x00027;Agostino (<publisher-loc>Pavia</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>274</fpage>&#x02013;<lpage>282</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://ieeexplore.ieee.org/document/8671560/authors&#x00023;authors">https://ieeexplore.ieee.org/document/8671560/authors&#x00023;authors</ext-link></citation>
</ref>
<ref id="B3">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Andreas</surname> <given-names>J.</given-names></name> <name><surname>Gomes</surname> <given-names>C.</given-names></name> <name><surname>Schulze</surname> <given-names>C.</given-names></name> <name><surname>Schuch</surname> <given-names>K.</given-names></name> <name><surname>Pierre</surname> <given-names>B.</given-names></name> <name><surname>Matthias</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>&#x0201C;The functional mock-up interface 3.0 - new features enabling new applications,&#x0201D;</article-title> in <source>Proceedings of 14th Modelica Conference 2021</source>, eds. M. Sjlund, L. Buffoni, A. Pop, and L. Ochel (<publisher-loc>Link&#x000F6;ping</publisher-loc>: <publisher-name>Modelica Conference</publisher-name>), <fpage>17</fpage>&#x02013;<lpage>26</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://ecp.ep.liu.se/index.php/modelica/article/view/178">https://ecp.ep.liu.se/index.php/modelica/article/view/178</ext-link></citation>
</ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Blockwitz</surname> <given-names>T.</given-names></name> <name><surname>Otter</surname> <given-names>M.</given-names></name> <name><surname>Akesson</surname> <given-names>J.</given-names></name> <name><surname>Arnold</surname> <given-names>M.</given-names></name> <name><surname>Clauss</surname> <given-names>C.</given-names></name> <name><surname>Elmqvist</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>&#x0201C;Functional Mockup Interface 2.0: the standard for tool independent exchange of simulation models,&#x0201D;</article-title> in <source>9th International MODELICA Conference, Munich, Germany</source>, <fpage>173</fpage>&#x02013;<lpage>184</lpage>.</citation>
</ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Borgdorff</surname> <given-names>J.</given-names></name> <name><surname>Mamonski</surname> <given-names>M.</given-names></name> <name><surname>Bosak</surname> <given-names>B.</given-names></name> <name><surname>Kurowski</surname> <given-names>K.</given-names></name> <name><surname>Ben Belgacem</surname> <given-names>M.</given-names></name> <name><surname>Chopard</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Distributed multiscale computing with MUSCLE 2, the multiscale coupling library and environment</article-title>. <source>J. Comput. Sci</source>. <volume>5</volume>, <fpage>719</fpage>&#x02013;<lpage>731</lpage>. <pub-id pub-id-type="doi">10.1016/j.jocs.2014.04.004</pub-id></citation>
</ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Boustani</surname> <given-names>S. E.</given-names></name> <name><surname>Destexhe</surname> <given-names>A.</given-names></name></person-group> (<year>2009</year>). <article-title>A master equation formalism for macroscopic modeling of asynchronous irregular activity states</article-title>. <source>Neural Comput</source>. <volume>21</volume>, <fpage>46</fpage>&#x02013;<lpage>100</lpage>. <pub-id pub-id-type="doi">10.1162/neco.2009.02-08-710</pub-id><pub-id pub-id-type="pmid">19210171</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Bower</surname> <given-names>J. M.</given-names></name> <name><surname>Beeman</surname> <given-names>D.</given-names></name></person-group> (<year>1998</year>). <article-title>&#x0201C;Introduction,&#x0201D;</article-title> in <source>The Book of GENESIS: Exploring Realistic Neural Models with the GEneral NEural SImulation System</source>, eds. J. M. Bower, and D. Beeman (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>3</fpage>&#x02013;<lpage>5</lpage>.</citation>
</ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Braun</surname> <given-names>R.</given-names></name> <name><surname>Krus</surname> <given-names>P.</given-names></name></person-group> (<year>2016</year>). <article-title>Multi-threaded distributed system simulations using the transmission line element method</article-title>. <source>Simulation</source> <volume>92</volume>, <fpage>921</fpage>&#x02013;<lpage>930</lpage>. <pub-id pub-id-type="doi">10.1177/0037549716667243</pub-id></citation>
</ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brette</surname> <given-names>R.</given-names></name> <name><surname>Gerstner</surname> <given-names>W.</given-names></name></person-group> (<year>2005</year>). <article-title>Adaptive exponential integrate-and-fire model as an effective description of neuronal activity</article-title>. <source>J. Neurophysiol</source>. <volume>94</volume>, <fpage>3637</fpage>&#x02013;<lpage>3642</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00686.2005</pub-id><pub-id pub-id-type="pmid">16014787</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cakan</surname> <given-names>C.</given-names></name> <name><surname>Jajcay</surname> <given-names>N.</given-names></name> <name><surname>Obermayer</surname> <given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>neurolib: a simulation framework for whole-brain neural mass modeling</article-title>. <source>Cognit. Comp</source>. <volume>15</volume>, <fpage>1132</fpage>&#x02013;<lpage>1152</lpage>. <pub-id pub-id-type="doi">10.1007/s12559-021-09931-9</pub-id></citation>
</ref>
<ref id="B11">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Carnevale</surname> <given-names>N. T.</given-names></name> <name><surname>Hines</surname> <given-names>M. L.</given-names></name></person-group> (<year>2006</year>). <source>The NEURON Book</source>. <publisher-loc>Cambridge</publisher-loc>: <publisher-name>Cambridge University Press</publisher-name>.</citation>
</ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chopard</surname> <given-names>B.</given-names></name> <name><surname>Borgdorff</surname> <given-names>J.</given-names></name> <name><surname>Hoekstra</surname> <given-names>A. G.</given-names></name></person-group> (<year>2014</year>). <article-title>A framework for multi-scale modelling</article-title>. <source>Philos. Trans. Math. Phys. Eng</source>. 372, 20130378. <pub-id pub-id-type="doi">10.1098/rsta.2013.0378</pub-id><pub-id pub-id-type="pmid">24982249</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cilliers</surname> <given-names>P.</given-names></name></person-group> (<year>2008</year>). <article-title>&#x0201C;3.1 knowing complex systems: The limits of understanding,&#x0201D;</article-title> in <source>A Vision of Transdisciplinarity: Laying Foundations for a World Knowledge Dialogue</source>, 43.</citation>
</ref>
<ref id="B14">
<citation citation-type="book"><person-group person-group-type="author"><collab>COMSOL</collab></person-group> (<year>2019</year>). <source>COMSOL Multiphysics Reference Manual</source>. <publisher-loc>Burlington MA</publisher-loc>: <publisher-name>The COMSOL Inc</publisher-name>.</citation>
</ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coveney</surname> <given-names>P. V.</given-names></name> <name><surname>Groen</surname> <given-names>D.</given-names></name> <name><surname>Hoekstra</surname> <given-names>A. G.</given-names></name></person-group> (<year>2021</year>). <article-title>Reliability and reproducibility in computational science: implementing validation, verification and uncertainty quantification in silico</article-title>. <source>Philos. Trans. Math. Phys. Eng</source>. 379, 20200409. <pub-id pub-id-type="doi">10.1098/rsta.2020.0409</pub-id><pub-id pub-id-type="pmid">33775138</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coveney</surname> <given-names>P. V.</given-names></name> <name><surname>Highfield</surname> <given-names>R. R.</given-names></name></person-group> (<year>2021</year>). <article-title>When we can trust computers (and when we can&#x00027;t)</article-title>. <source>Philos. Trans. Math. Phys. Eng</source>. 379, 20200067. <pub-id pub-id-type="doi">10.1098/rsta.2020.0067</pub-id></citation>
</ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>D&#x00027;Angelo</surname> <given-names>E.</given-names></name> <name><surname>Jirsa</surname> <given-names>V.</given-names></name></person-group> (<year>2022</year>). <article-title>The quest for multiscale brain modeling</article-title>. <source>Trends Neurosci</source>. <volume>45</volume>, <fpage>777</fpage>&#x02013;<lpage>790</lpage>. <pub-id pub-id-type="doi">10.1016/j.tins.2022.06.007</pub-id><pub-id pub-id-type="pmid">35906100</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>di Volo</surname> <given-names>M.</given-names></name> <name><surname>Romagnoni</surname> <given-names>A.</given-names></name> <name><surname>Capone</surname> <given-names>C.</given-names></name> <name><surname>Destexhe</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>Biologically realistic mean-field models of conductance-based networks of spiking neurons with adaptation</article-title>. <source>Neural Comput</source>. <volume>31</volume>, <fpage>653</fpage>&#x02013;<lpage>680</lpage>. <pub-id pub-id-type="doi">10.1162/neco_a_01173</pub-id><pub-id pub-id-type="pmid">30764741</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Djurfeldt</surname> <given-names>M.</given-names></name> <name><surname>Hjorth</surname> <given-names>J.</given-names></name> <name><surname>Eppler</surname> <given-names>J. M.</given-names></name> <name><surname>Dudani</surname> <given-names>N.</given-names></name> <name><surname>Helias</surname> <given-names>M.</given-names></name> <name><surname>Potjans</surname> <given-names>T. C.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>Run-time interoperability between neuronal network simulators based on the MUSIC framework</article-title>. <source>Neuroinformatics</source> <volume>8</volume>, <fpage>43</fpage>&#x02013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.1007/s12021-010-9064-z</pub-id><pub-id pub-id-type="pmid">20195795</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Dudkowski</surname> <given-names>D.</given-names></name></person-group> (<year>2009</year>). <article-title>&#x0201C;Co-design patterns for embedded network management,&#x0201D;</article-title> in <source>Proceedings of the 2009 Workshop on Re-Architecting the Internet</source> (<publisher-loc>Rome Italy. ACM</publisher-loc>), <fpage>61</fpage>&#x02013;<lpage>66</lpage>.</citation>
</ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Durstewitz</surname> <given-names>D.</given-names></name> <name><surname>Seamans</surname> <given-names>J. K.</given-names></name> <name><surname>Sejnowski</surname> <given-names>T. J.</given-names></name></person-group> (<year>2011</year>). <article-title>Neurocomputational models of working memory</article-title>. <source>Nat. Neurosci</source>. <volume>3</volume>, <fpage>1184</fpage>&#x02013;<lpage>1191</lpage>. <pub-id pub-id-type="doi">10.1038/81460</pub-id><pub-id pub-id-type="pmid">11127836</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Einevoll</surname> <given-names>G. T.</given-names></name> <name><surname>Destexhe</surname> <given-names>A.</given-names></name> <name><surname>Grn</surname> <given-names>S.</given-names></name> <name><surname>Diesmann</surname> <given-names>M.</given-names></name> <name><surname>Jirsa</surname> <given-names>V.</given-names></name> <name><surname>de Kamps</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>The scientific case for brain simulations</article-title>. <source>Neuron</source> <volume>102</volume>, <fpage>735</fpage>&#x02013;<lpage>744</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2019.03.027</pub-id><pub-id pub-id-type="pmid">31121126</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Falotico</surname> <given-names>E.</given-names></name> <name><surname>Vannucci</surname> <given-names>L.</given-names></name> <name><surname>Ambrosano</surname> <given-names>A.</given-names></name> <name><surname>Albanese</surname> <given-names>U.</given-names></name> <name><surname>Ulbrich</surname> <given-names>S.</given-names></name> <name><surname>Vasquez Tieck</surname> <given-names>J. C.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Connecting artificial brains to robots in a comprehensive simulation framework: The neurorobotics platform</article-title>. <source>Front. Neurorobot</source>. 11, 2. <pub-id pub-id-type="doi">10.3389/fnbot.2017.00002</pub-id><pub-id pub-id-type="pmid">28179882</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Finkelstein</surname> <given-names>A.</given-names></name> <name><surname>Hetherington</surname> <given-names>J.</given-names></name> <name><surname>Linzhong</surname> <given-names>L.</given-names></name> <name><surname>Margoninski</surname> <given-names>O.</given-names></name> <name><surname>Saffrey</surname> <given-names>P.</given-names></name> <name><surname>Seymour</surname> <given-names>R.</given-names></name> <name><surname>Warner</surname> <given-names>A.</given-names></name></person-group> (<year>2004</year>). <article-title>Computational challenges of systems biology</article-title>. <source>Computer</source> <volume>37</volume>, <fpage>26</fpage>&#x02013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1109/MC.2004.1297236</pub-id></citation>
</ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fish</surname> <given-names>J.</given-names></name> <name><surname>Wagner</surname> <given-names>G. J.</given-names></name> <name><surname>Keten</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Mesoscopic and multiscale modelling in materials</article-title>. <source>Nat. Mater</source>. <volume>20</volume>, <fpage>774</fpage>&#x02013;<lpage>786</lpage>. <pub-id pub-id-type="doi">10.1038/s41563-020-00913-0</pub-id><pub-id pub-id-type="pmid">34045697</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fr&#x000E9;gnac</surname> <given-names>Y.</given-names></name></person-group> (<year>2021</year>). <article-title>How blue is the sky?</article-title> <source>eNeuro</source> <volume>8</volume>, <fpage>2</fpage>. <pub-id pub-id-type="doi">10.1523/ENEURO.0130-21.2021</pub-id></citation>
</ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gewaltig</surname> <given-names>M.-O.</given-names></name> <name><surname>Diesmann</surname> <given-names>M.</given-names></name></person-group> (<year>2007</year>). <article-title>NEST (NEural simulation tool)</article-title>. <source>Scholarpedia</source> 2, 1430. <pub-id pub-id-type="doi">10.4249/scholarpedia.1430</pub-id></citation>
</ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Goddard</surname> <given-names>N.</given-names></name> <name><surname>Hood</surname> <given-names>G.</given-names></name> <name><surname>Howell</surname> <given-names>F.</given-names></name> <name><surname>Hines</surname> <given-names>M.</given-names></name> <name><surname>De Schutter</surname> <given-names>E.</given-names></name></person-group> (<year>2001</year>). <article-title>NEOSIM: Portable large-scale plug and play modelling</article-title>. <source>Neurocomputing</source> 38&#x02013;<volume>40</volume>, <fpage>1657</fpage>&#x02013;<lpage>1661</lpage>. <pub-id pub-id-type="doi">10.1016/S0925-2312(01)00528-8</pub-id></citation>
</ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gomes</surname> <given-names>C.</given-names></name> <name><surname>Thule</surname> <given-names>C.</given-names></name> <name><surname>Broman</surname> <given-names>D.</given-names></name> <name><surname>Larsen</surname> <given-names>P. G.</given-names></name> <name><surname>Vangheluwe</surname> <given-names>H.</given-names></name></person-group> (<year>2018</year>). <article-title>Co-simulation: A survey</article-title>. <source>ACM</source>. <volume>51</volume>, <fpage>1</fpage>&#x02013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1145/3179993</pub-id></citation>
</ref>
<ref id="B30">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Gutlein</surname> <given-names>M.</given-names></name> <name><surname>Baron</surname> <given-names>W.</given-names></name> <name><surname>Renner</surname> <given-names>C.</given-names></name> <name><surname>Djanatliev</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>&#x0201C;Performance evaluation of HLA RTI implementations,&#x0201D;</article-title> in <source>2020 IEEE/ACM 24th International Symposium on Distributed Simulation and Real Time Applications (DS-RT)</source> (<publisher-loc>Prague, Czech Republic</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x02013;<lpage>8</lpage>.</citation>
</ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hagen</surname> <given-names>E.</given-names></name> <name><surname>Dahmen</surname> <given-names>D.</given-names></name> <name><surname>Stavrinou</surname> <given-names>M. L.</given-names></name> <name><surname>Lind&#x000E9;n</surname> <given-names>H.</given-names></name> <name><surname>Tetzlaff</surname> <given-names>T.</given-names></name> <name><surname>Albada</surname> <given-names>V.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Hybrid scheme for modeling local field potentials from point-neuron networks</article-title>. <source>Cereb. Cortex</source> <volume>26</volume>, <fpage>4461</fpage>&#x02013;<lpage>4496</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhw237</pub-id><pub-id pub-id-type="pmid">27797828</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Hahne</surname> <given-names>J.</given-names></name> <name><surname>Diaz</surname> <given-names>S.</given-names></name> <name><surname>Patronis</surname> <given-names>A.</given-names></name> <name><surname>Schenck</surname> <given-names>W.</given-names></name> <name><surname>Peyser</surname> <given-names>A.</given-names></name> <name><surname>Graber</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2021</year>). <source>NEST 3.0</source>. Zenodo. Available online at: <ext-link ext-link-type="uri" xlink:href="https://zenodo.org/record/4739103">https://zenodo.org/record/4739103</ext-link></citation>
</ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hetherington</surname> <given-names>J.</given-names></name> <name><surname>Bogle</surname> <given-names>I. D. L.</given-names></name> <name><surname>Saffrey</surname> <given-names>P.</given-names></name> <name><surname>Margoninski</surname> <given-names>O.</given-names></name> <name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Rey</surname> <given-names>M. V.</given-names></name> <etal/></person-group>. (<year>2007</year>). <article-title>Addressing the challenges of multiscale model management in systems biology</article-title>. <source>Comp. Chem. Eng</source>. <volume>31</volume>, <fpage>962</fpage>&#x02013;<lpage>979</lpage>. <pub-id pub-id-type="doi">10.1016/j.compchemeng.2006.10.004</pub-id></citation>
</ref>
<ref id="B34">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Hintjens</surname> <given-names>P.</given-names></name></person-group> (<year>2013</year>). <source>ZeroMQ: Messaging for Many Applications</source>. <publisher-loc>Sebastopol, CA</publisher-loc>: <publisher-name>O&#x00027;Reilly Media, Inc</publisher-name>.</citation>
</ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huys</surname> <given-names>R.</given-names></name> <name><surname>Perdikis</surname> <given-names>D.</given-names></name> <name><surname>Jirsa</surname> <given-names>V. K.</given-names></name></person-group> (<year>2014</year>). <article-title>Functional architectures and structured flows on manifolds: a dynamical framework for motor behavior</article-title>. <source>Psychol. Rev</source>. <volume>121</volume>, <fpage>302</fpage>&#x02013;<lpage>336</lpage>. <pub-id pub-id-type="doi">10.1037/a0037014</pub-id><pub-id pub-id-type="pmid">25090422</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jirsa</surname> <given-names>V.</given-names></name> <name><surname>Sheheitli</surname> <given-names>H.</given-names></name></person-group> (<year>2022</year>). <article-title>Entropy, free energy, symmetry and dynamics in the brain</article-title>. <source>J. Phys.: Complex</source>. 3, 015007. <pub-id pub-id-type="doi">10.1088/2632-072X/ac4bec</pub-id></citation>
</ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jordan</surname> <given-names>J.</given-names></name> <name><surname>Weidel</surname> <given-names>P.</given-names></name> <name><surname>Morrison</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>A closed-loop toolchain for neural network simulations of learning autonomous agents</article-title>. <source>Front. Comput. Neurosci</source>. 13, 46. <pub-id pub-id-type="doi">10.3389/fncom.2019.00046</pub-id><pub-id pub-id-type="pmid">31427939</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kuhn</surname> <given-names>A.</given-names></name> <name><surname>Aertsen</surname> <given-names>A.</given-names></name> <name><surname>Rotter</surname> <given-names>S.</given-names></name></person-group> (<year>2003</year>). <article-title>Higher-order statistics of input ensembles and the response of simple model neurons</article-title>. <source>Neural Comput</source>. <volume>15</volume>, <fpage>67</fpage>&#x02013;<lpage>101</lpage>. <pub-id pub-id-type="doi">10.1162/089976603321043702</pub-id><pub-id pub-id-type="pmid">12590820</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Lacoursire</surname> <given-names>C.</given-names></name> <name><surname>Hrdin</surname> <given-names>T.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;FMI Go! A simulation runtime environment with a client server architecture over multiple protocols,&#x0201D;</article-title> in <source>10th International Modelica Conference, March 10-12, 2014, Lund, Sweden</source> (<publisher-loc>Prague</publisher-loc>: <publisher-name>Modelica Conference</publisher-name>), <fpage>653</fpage>&#x02013;<lpage>662</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://2017.international.conference.modelica.org/proceedings/html/material.html">https://2017.international.conference.modelica.org/proceedings/html/material.html</ext-link></citation>
</ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lang</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>yggdrasil: a Python package for integrating computational models across languages and scales</article-title>. <source>Plants</source> 1, diz001. <pub-id pub-id-type="doi">10.1093/insilicoplants/diz001</pub-id></citation>
</ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lein</surname> <given-names>E. S.</given-names></name> <name><surname>Hawrylycz</surname> <given-names>M. J.</given-names></name> <name><surname>Ao</surname> <given-names>N.</given-names></name> <name><surname>Ayres</surname> <given-names>M.</given-names></name> <name><surname>Bensinger</surname> <given-names>A.</given-names></name> <name><surname>Bernard</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2007</year>). <article-title>Genome-wide atlas of gene expression in the adult mouse brain</article-title>. <source>Nature</source> <volume>445</volume>, <fpage>168</fpage>&#x02013;<lpage>176</lpage>. <pub-id pub-id-type="doi">10.1038/nature05453</pub-id><pub-id pub-id-type="pmid">17151600</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="book"><person-group person-group-type="author"><collab>MATLAB</collab></person-group> (<year>2017</year>). <source>version 9.2.0 (R2017a)</source>. <publisher-loc>Natick, MA</publisher-loc>: <publisher-name>The MathWorks Inc</publisher-name>.</citation>
</ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Matthews</surname> <given-names>M. L.</given-names></name> <name><surname>Marshall-Coln</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>Multiscale plant modeling: from genome to phenome and beyond</article-title>. <source>Emerg. Topics Life Sci</source>. <volume>5</volume>, <fpage>231</fpage>&#x02013;<lpage>237</lpage>. <pub-id pub-id-type="doi">10.1042/ETLS20200276</pub-id><pub-id pub-id-type="pmid">33543231</pub-id></citation></ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Meier</surname> <given-names>J. M.</given-names></name> <name><surname>Perdikis</surname> <given-names>D.</given-names></name> <name><surname>Blickensd&#x000F6;rfer</surname> <given-names>A.</given-names></name> <name><surname>Stefanovski</surname> <given-names>L.</given-names></name> <name><surname>Liu</surname> <given-names>Q.</given-names></name> <name><surname>Maith</surname> <given-names>O.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Virtual deep brain stimulation: multiscale co-simulation of a spiking basal ganglia model and a whole-brain mean-field model with the virtual brain</article-title>. <source>Exp. Neurol</source>. <volume>354</volume>:<fpage>114111</fpage>. <pub-id pub-id-type="doi">10.1016/j.expneurol.2022.114111</pub-id><pub-id pub-id-type="pmid">35569510</pub-id></citation></ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Meier-Schellersheim</surname> <given-names>M.</given-names></name> <name><surname>Fraser</surname> <given-names>I. D. C.</given-names></name> <name><surname>Klauschen</surname> <given-names>F.</given-names></name></person-group> (<year>2009</year>). <article-title>Multiscale modeling for biologists</article-title>. <source>Wiley Interdiscip. Rev.: Syst. Biol. Med</source>. <volume>1</volume>, <fpage>4</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1002/wsbm.33</pub-id></citation>
</ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Melozzi</surname> <given-names>F.</given-names></name> <name><surname>Woodman</surname> <given-names>M. M.</given-names></name> <name><surname>Jirsa</surname> <given-names>V. K.</given-names></name> <name><surname>Bernard</surname> <given-names>C.</given-names></name></person-group> (<year>2017</year>). <article-title>The virtual mouse brain: A computational neuroinformatics platform to study whole mouse brain dynamics</article-title>. <source>eNeuro</source> 4, ENEURO.0111-17.2017. <pub-id pub-id-type="doi">10.1523/ENEURO.0111-17.2017</pub-id><pub-id pub-id-type="pmid">28664183</pub-id></citation></ref>
<ref id="B47">
<citation citation-type="web"><person-group person-group-type="author"><collab>Message Passing Interface Forum</collab></person-group> (<year>2015</year>). <source>MPI: A Message-Passing Interface Standard Version 3.1</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.mpi-forum.org/">https://www.mpi-forum.org/</ext-link></citation>
</ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mitchinson</surname> <given-names>B.</given-names></name> <name><surname>Chan</surname> <given-names>T.-S.</given-names></name> <name><surname>Chambers</surname> <given-names>J.</given-names></name> <name><surname>Pearson</surname> <given-names>M.</given-names></name> <name><surname>Humphries</surname> <given-names>M.</given-names></name> <name><surname>Fox</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>BRAHMS: Novel middleware for integrated systems computation</article-title>. <source>Adv. Eng. Inform</source>. <volume>24</volume>, <fpage>49</fpage>&#x02013;<lpage>61</lpage>. <pub-id pub-id-type="doi">10.1016/j.aei.2009.08.002</pub-id></citation>
</ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Moren</surname> <given-names>J.</given-names></name> <name><surname>Sugimoto</surname> <given-names>N.</given-names></name> <name><surname>Doya</surname> <given-names>K.</given-names></name></person-group> (<year>2015</year>). <article-title>Real-time utilization of system-scale neuroscience models</article-title>. <source>Brain Neural Netw</source>. <volume>22</volume>, <fpage>125</fpage>&#x02013;<lpage>132</lpage>. <pub-id pub-id-type="doi">10.3902/jnns.22.125</pub-id></citation>
</ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Neema</surname> <given-names>H.</given-names></name> <name><surname>Gohl</surname> <given-names>J.</given-names></name> <name><surname>Lattmann</surname> <given-names>Z.</given-names></name> <name><surname>Sztipanovits</surname> <given-names>J.</given-names></name> <name><surname>Karsai</surname> <given-names>G.</given-names></name> <name><surname>Neema</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>&#x0201C;Model-based integration platform for FMI co-simulation and heterogeneous simulations of cyber-physical systems,&#x0201D;</article-title> in <source>10th International Modelica Conference, March 10-12, 2014, Lund, Sweden</source>, <fpage>235</fpage>&#x02013;<lpage>245</lpage>.</citation>
</ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nguyen</surname> <given-names>V. H.</given-names></name> <name><surname>Besanger</surname> <given-names>Y.</given-names></name> <name><surname>Tran</surname> <given-names>Q. T.</given-names></name> <name><surname>Nguyen</surname> <given-names>T. L.</given-names></name></person-group> (<year>2007</year>). <article-title>On conceptual structuration and coupling methods of co-simulation frameworks in cyber-physical energy system validation</article-title>. <source>Energies</source> <volume>10</volume>, <fpage>1977</fpage>. <pub-id pub-id-type="doi">10.3390/en10121977</pub-id></citation>
</ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nordlie</surname> <given-names>E.</given-names></name> <name><surname>Gewaltig</surname> <given-names>M.-O.</given-names></name> <name><surname>Plesser</surname> <given-names>H. E.</given-names></name></person-group> (<year>2009</year>). <article-title>Towards reproducible descriptions of neuronal network models</article-title>. <source>PLoS Comput. Biol</source>. 5, e1000456. <pub-id pub-id-type="doi">10.1371/journal.pcbi.1000456</pub-id><pub-id pub-id-type="pmid">19662159</pub-id></citation></ref>
<ref id="B53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Oh</surname> <given-names>S. W.</given-names></name> <name><surname>Harris</surname> <given-names>J. A.</given-names></name> <name><surname>Ng</surname> <given-names>L.</given-names></name> <name><surname>Winslow</surname> <given-names>B.</given-names></name> <name><surname>Cain</surname> <given-names>N.</given-names></name> <name><surname>Mihalas</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>A mesoscale connectome of the mouse brain</article-title>. <source>Nature</source> <volume>508</volume>, <fpage>207</fpage>&#x02013;<lpage>214</lpage>. <pub-id pub-id-type="doi">10.1038/nature13186</pub-id><pub-id pub-id-type="pmid">24695228</pub-id></citation></ref>
<ref id="B54">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pillai</surname> <given-names>A. S.</given-names></name> <name><surname>Jirsa</surname> <given-names>V. K.</given-names></name></person-group> (<year>2017</year>). <article-title>Symmetry breaking in space-time hierarchies shapes brain dynamics and behavior</article-title>. <source>Neuron</source> <volume>94</volume>, <fpage>1010</fpage>&#x02013;<lpage>1026</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2017.05.013</pub-id><pub-id pub-id-type="pmid">28595045</pub-id></citation></ref>
<ref id="B55">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Quigley</surname> <given-names>M.</given-names></name> <name><surname>Conley</surname> <given-names>K.</given-names></name> <name><surname>Gerkey</surname> <given-names>B.</given-names></name> <name><surname>Faust</surname> <given-names>J.</given-names></name> <name><surname>Foote</surname> <given-names>T.</given-names></name> <name><surname>Leibs</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>&#x0201C;Ros: an open-source robot operating system,&#x0201D;</article-title> in <source>ICRA Workshop on Open Source Software</source>. (<publisher-loc>Kobe</publisher-loc>: <publisher-name>ICRA</publisher-name>), <fpage>5</fpage>.</citation>
</ref>
<ref id="B56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rahman</surname> <given-names>M. M.</given-names></name> <name><surname>Feng</surname> <given-names>Y.</given-names></name> <name><surname>Yankeelov</surname> <given-names>T. E.</given-names></name> <name><surname>Oden</surname> <given-names>J. T.</given-names></name></person-group> (<year>2017</year>). <article-title>A fully coupled space time multiscale modeling framework for predicting tumor growth</article-title>. <source>Comput. Methods Appl. Mech. Eng</source>. <volume>320</volume>, <fpage>261</fpage>&#x02013;<lpage>286</lpage>. <pub-id pub-id-type="doi">10.1016/j.cma.2017.03.021</pub-id><pub-id pub-id-type="pmid">29158608</pub-id></citation></ref>
<ref id="B57">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rejniak</surname> <given-names>K. A.</given-names></name> <name><surname>Anderson</surname> <given-names>A. R. A.</given-names></name></person-group> (<year>2011</year>). <article-title>Hybrid models of tumor growth</article-title>. <source>Wiley Interdiscip. Rev. Syst. Biol. Med</source>. <volume>3</volume>, <fpage>115</fpage>&#x02013;<lpage>125</lpage>. <pub-id pub-id-type="doi">10.1002/wsbm.102</pub-id></citation>
</ref>
<ref id="B58">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Renz</surname> <given-names>A. F.</given-names></name> <name><surname>Lee</surname> <given-names>J.</given-names></name> <name><surname>Tybrandt</surname> <given-names>K.</given-names></name> <name><surname>Brzezinski</surname> <given-names>M.</given-names></name> <name><surname>Lorenzo</surname> <given-names>D. A.</given-names></name> <name><surname>Cheraka</surname> <given-names>M. C.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Opto-e-dura: a soft, stretchable ECoG array for multimodal, multiscale neuroscience</article-title>. <source>Adv. Healthc. Mat</source>. 9, 2000814. <pub-id pub-id-type="doi">10.1002/adhm.202000814</pub-id><pub-id pub-id-type="pmid">32691992</pub-id></citation></ref>
<ref id="B59">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sanz Leon</surname> <given-names>P.</given-names></name> <name><surname>Knock</surname> <given-names>S. A.</given-names></name> <name><surname>Woodman</surname> <given-names>M. M.</given-names></name> <name><surname>Domide</surname> <given-names>L.</given-names></name> <name><surname>Mersmann</surname> <given-names>J.</given-names></name> <name><surname>McIntosh</surname> <given-names>A. R.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>The virtual brain: a simulator of primate brain network dynamics</article-title>. <source>Front. Neuroinform</source>. 7, 10. <pub-id pub-id-type="doi">10.3389/fninf.2013.00010</pub-id></citation>
</ref>
<ref id="B60">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sanz-Leon</surname> <given-names>P.</given-names></name> <name><surname>Knock</surname> <given-names>S. A.</given-names></name> <name><surname>Spiegler</surname> <given-names>A.</given-names></name> <name><surname>Jirsa</surname> <given-names>V. K.</given-names></name></person-group> (<year>2015</year>). <article-title>Mathematical framework for large-scale brain network modeling in the virtual brain</article-title>. <source>Neuroimage</source> <volume>111</volume>, <fpage>385</fpage>&#x02013;<lpage>430</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.01.002</pub-id><pub-id pub-id-type="pmid">25592995</pub-id></citation></ref>
<ref id="B61">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Saunders</surname> <given-names>R.</given-names></name></person-group> (<year>2010</year>). <source>IEEE Standard for Modeling and Simulation (M &#x00026; S) High Level Architecture (HLA) Framework and Rules</source>. IEEE. Available online at: <ext-link ext-link-type="uri" xlink:href="https://ieeexplore.ieee.org/document/5557731">https://ieeexplore.ieee.org/document/5557731</ext-link></citation>
</ref>
<ref id="B62">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schirner</surname> <given-names>M.</given-names></name> <name><surname>Domide</surname> <given-names>L.</given-names></name> <name><surname>Perdikis</surname> <given-names>D.</given-names></name> <name><surname>Triebkorn</surname> <given-names>P.</given-names></name> <name><surname>Stefanovski</surname> <given-names>L.</given-names></name> <name><surname>Pai</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Brain simulation as a cloud service: the virtual brain on EBRAINS</article-title>. <source>Neuroimage</source> <volume>251</volume>, <fpage>118973</fpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2022.118973</pub-id><pub-id pub-id-type="pmid">35131433</pub-id></citation></ref>
<ref id="B63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schlick</surname> <given-names>T.</given-names></name> <name><surname>Portillo-Ledesma</surname> <given-names>S.</given-names></name> <name><surname>Blaszczyk</surname> <given-names>M.</given-names></name> <name><surname>Dalessandro</surname> <given-names>L.</given-names></name> <name><surname>Ghosh</surname> <given-names>S.</given-names></name> <name><surname>Hackl</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>A multiscale vision-illustrative applications from biology to engineering</article-title>. <source>Int. J. Mult. Comp. Eng</source>. <volume>19</volume>, <fpage>39</fpage>&#x02013;<lpage>73</lpage>. <pub-id pub-id-type="doi">10.1615/IntJMultCompEng.2021039845</pub-id><pub-id pub-id-type="pmid">35330633</pub-id></citation></ref>
<ref id="B64">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shaheen</surname> <given-names>H.</given-names></name> <name><surname>Pal</surname> <given-names>S.</given-names></name> <name><surname>Melnik</surname> <given-names>R.</given-names></name></person-group> (<year>2022</year>). <article-title>Multiscale co-simulation of deep brain stimulation with brain networks in neurodegenerative disorders</article-title>. <source>Brain Multiphysics</source> <volume>3</volume>, <fpage>100058</fpage>. <pub-id pub-id-type="doi">10.1016/j.brain.2022.100058</pub-id></citation>
</ref>
<ref id="B65">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shuman</surname> <given-names>T.</given-names></name> <name><surname>Aharoni</surname> <given-names>D.</given-names></name> <name><surname>Cai</surname> <given-names>D. J.</given-names></name> <name><surname>Lee</surname> <given-names>C. R.</given-names></name> <name><surname>Chavlis</surname> <given-names>S.</given-names></name> <name><surname>Page-Harley</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Breakdown of spatial coding and interneuron synchronization in epileptic mice</article-title>. <source>Nat. Neurosci</source>. <volume>23</volume>, <fpage>229</fpage>&#x02013;<lpage>238</lpage>. <pub-id pub-id-type="doi">10.1038/s41593-019-0559-0</pub-id><pub-id pub-id-type="pmid">31907437</pub-id></citation></ref>
<ref id="B66">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stimberg</surname> <given-names>M.</given-names></name> <name><surname>Brette</surname> <given-names>R.</given-names></name> <name><surname>Goodman</surname> <given-names>D. F.</given-names></name></person-group> (<year>2019</year>). <article-title>Brian 2, an intuitive and efficient neural simulator</article-title>. <source>Elife</source> <volume>8</volume>, <fpage>e47314</fpage>. <pub-id pub-id-type="doi">10.7554/eLife.47314</pub-id><pub-id pub-id-type="pmid">31429824</pub-id></citation></ref>
<ref id="B67">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vitay</surname> <given-names>J.</given-names></name> <name><surname>Dinkelbach</surname> <given-names>H.</given-names></name> <name><surname>Hamker</surname> <given-names>F.</given-names></name></person-group> (<year>2015</year>). <article-title>ANNarchy: a code generation approach to neural simulations on parallel hardware</article-title>. <source>Front. Neuroinform</source>. 9, 19. <pub-id pub-id-type="doi">10.3389/fninf.2015.00019</pub-id><pub-id pub-id-type="pmid">26283957</pub-id></citation></ref>
<ref id="B68">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Weidel</surname> <given-names>P.</given-names></name> <name><surname>Djurfeldt</surname> <given-names>M.</given-names></name> <name><surname>Duarte</surname> <given-names>R. C.</given-names></name> <name><surname>Morrison</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>Closed loop interactions between spiking neural network and robotic simulators based on MUSIC and ROS</article-title>. <source>Front. Neuroinform</source>. 10, 31. <pub-id pub-id-type="doi">10.3389/fninf.2016.00031</pub-id><pub-id pub-id-type="pmid">27536234</pub-id></citation></ref>
</ref-list>
</back>
</article>