<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Robot. AI</journal-id>
<journal-title>Frontiers in Robotics and AI</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Robot. AI</abbrev-journal-title>
<issn pub-type="epub">2296-9144</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">529872</article-id>
<article-id pub-id-type="doi">10.3389/frobt.2021.529872</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Robotics and AI</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Robustness of Bio-Inspired Visual Systems for Collision Prediction in Critical Robot Traffic</article-title>
<alt-title alt-title-type="left-running-head">Fu et&#x20;al.</alt-title>
<alt-title alt-title-type="right-running-head">Collision Prediction in Robot Traffic</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Fu</surname>
<given-names>Qinbing</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/808441/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Sun</surname>
<given-names>Xuelong</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1330088/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Liu</surname>
<given-names>Tian</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1330084/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hu</surname>
<given-names>Cheng</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1250201/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Yue</surname>
<given-names>Shigang</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/135057/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<label>
<sup>1</sup>
</label>Machine Life and Intelligence Research Centre, School of Mechanical and Electrical Engineering, Guangzhou University, <addr-line>Guangzhou</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<label>
<sup>2</sup>
</label>School of Computer Science, University of Lincoln, <addr-line>Lincoln</addr-line>, <country>United Kingdom</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/471689/overview">Subhadeep Chakraborty</ext-link>, The University of Tennessee, Knoxville, United&#x20;States</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/997560/overview">Pengcheng Liu</ext-link>, University of York, United&#x20;Kingdom</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1286267/overview">Jindong Tan</ext-link>, The University of Tennessee, Knoxville, United&#x20;States</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Qinbing Fu, <email>qifu@lincoln.ac.uk</email>; Shigang Yue, <email>syue@lincoln.ac.uk</email>
</corresp>
<fn fn-type="equal" id="fn1">
<label>
<sup>&#x2020;</sup>
</label>
<p>These authors share first authorship</p>
</fn>
<fn fn-type="other">
<p>This article was submitted to Smart Sensor Networks and Autonomy, a section of the journal Frontiers in Robotics and&#x20;AI</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>06</day>
<month>08</month>
<year>2021</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>8</volume>
<elocation-id>529872</elocation-id>
<history>
<date date-type="received">
<day>27</day>
<month>01</month>
<year>2020</year>
</date>
<date date-type="accepted">
<day>19</day>
<month>07</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2021 Fu, Sun, Liu, Hu and Yue.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Fu, Sun, Liu, Hu and Yue</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these&#x20;terms.</p>
</license>
</permissions>
<abstract>
<p>Collision prevention sets a major research and development obstacle for intelligent robots and vehicles. This paper investigates the robustness of two state-of-the-art neural network models inspired by the locust&#x2019;s LGMD-1 and LGMD-2 visual pathways as fast and low-energy collision alert systems in critical scenarios. Although both the neural circuits have been studied and modelled intensively, their capability and robustness against real-time critical traffic scenarios where real-physical crashes will happen have never been systematically investigated due to difficulty and high price in replicating risky traffic with many crash occurrences. To close this gap, we apply a recently published robotic platform to test the LGMDs inspired visual systems in physical implementation of critical traffic scenarios at low cost and high flexibility. The proposed visual systems are applied as the only collision sensing modality in each micro-mobile robot to conduct avoidance by abrupt braking. The simulated traffic resembles on-road sections including the intersection and highway scenes wherein the roadmaps are rendered by coloured, artificial pheromones upon a wide LCD screen acting as the ground of an arena. The robots with light sensors at bottom can recognise the lanes and signals, tightly follow paths. The emphasis herein is laid on corroborating the robustness of LGMDs neural systems model in different dynamic robot scenes to timely alert potential crashes. This study well complements previous experimentation on such bio-inspired computations for collision prediction in more critical physical scenarios, and for the first time demonstrates the robustness of LGMDs inspired visual systems in critical traffic towards a reliable collision alert system under constrained computation power. This paper also exhibits a novel, tractable, and affordable robotic approach to evaluate online visual systems in dynamic scenes.</p>
</abstract>
<kwd-group>
<kwd>bio-inspired computation</kwd>
<kwd>collision prediction</kwd>
<kwd>robust visual systems</kwd>
<kwd>LGMDs</kwd>
<kwd>micro-robot</kwd>
<kwd>critical robot traffic</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>The World Health Organisation (WHO) reported that every year, approximately 1.35 millions people worldwide die on road traffic with an increase of 0.11 million over only 5&#xa0;years ago (<xref ref-type="bibr" rid="B48">WHO, 2018</xref>). Collision prevention is an old, but active topic in research communities since it is still obstructing the development of intelligent robots and vehicles. For examples, the internet of vehicles (IoV) systems and technologies are confronting huge challenges from traffic accidents where the emergent strategies from deep learning (<xref ref-type="bibr" rid="B5">Chang et&#x20;al., 2019</xref>) and cloud communication (<xref ref-type="bibr" rid="B53">Zhou et&#x20;al., 2020</xref>) are improving the IoV&#x2019;s reliability. The unmanned aerial vehicles (UAVs) industries are also reflecting on how to enhance the capability of obstacle detection and avoidance especially when flying through unstructured, dynamic scenes (<xref ref-type="bibr" rid="B1">Albaker and Rahim, 2009</xref>). On-road crashes usually occur randomly which are difficult to predict and trace. The typical accident-prone places include intersections, road junctions and highways, etc., where collision prevention is difficult to tackle (<xref ref-type="bibr" rid="B30">Mukhtar et&#x20;al., 2015</xref>).</p>
<p>Therefore, a critically important task is the development of collision avoidance systems with ultimate reliability, which nevertheless is faced with huge challenges (<xref ref-type="bibr" rid="B51">Zehang Sun et&#x20;al., 2006</xref>; <xref ref-type="bibr" rid="B42">Sivaraman and Trivedi, 2013</xref>; <xref ref-type="bibr" rid="B30">Mukhtar et&#x20;al., 2015</xref>). The cutting-edge techniques for collision prediction include global positioning system (GPS), active (Radar, Laser, Lidar), and passive (acoustic and optical) sensor strategies, as well as combinations of these with sensor-based algorithms (<xref ref-type="bibr" rid="B51">Zehang Sun et&#x20;al., 2006</xref>; <xref ref-type="bibr" rid="B30">Mukhtar et&#x20;al., 2015</xref>). More specifically, the GPS has been used for predicting real time trajectory of vehicles for collision risk estimation (<xref ref-type="bibr" rid="B2">Ammoun and Nashashibi, 2009</xref>). The vision-based techniques have been implemented in passive sensors, i.e.,&#x20;the different kinds of cameras (<xref ref-type="bibr" rid="B45">Sun et&#x20;al., 2004</xref>; <xref ref-type="bibr" rid="B30">Mukhtar et&#x20;al., 2015</xref>). Compared to the active sensors like the Radar, the main advantages of visual ones are lower price and wider coverage of detection range up to 360&#xb0;, which can provide much richer description about the vehicle&#x2019;s surroundings including motion analysis (<xref ref-type="bibr" rid="B36">Sabzevari and Scaramuzza, 2016</xref>). Compared to the IoV and GPS techniques, the optical methods are not restricted by surrounding infrastructures (<xref ref-type="bibr" rid="B30">Mukhtar et&#x20;al., 2015</xref>). However, the visual approaches bring about pronounced challenges upon fast implementation in real time, and accurate extraction of colliding features from the dynamic visual world mixed with many distractors. A reliable, real-time, energy-efficient collision alert visual system has not yet been demonstrated so&#x20;far.</p>
<p>Fortunately, nature has been providing us with a lot of inspirations to construct collision sensing visual systems. Robust and efficient collision prediction system is ubiquitous amongst the vast majority of sighted animals. As a source of inspiration, the insects&#x2019; dynamic vision systems have been explored as powerful paradigms for collision detection and avoidance with numerous applications in machine vision, as reviewed in (<xref ref-type="bibr" rid="B11">Franceschini, 2014</xref>; <xref ref-type="bibr" rid="B39">Serres and Ruffier, 2017</xref>; <xref ref-type="bibr" rid="B15">Fu et&#x20;al., 2018a</xref>, <xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>). As a prominent example, locusts can migrate for a long distance in dense swarms containing hundreds to thousands of individuals free of collision (<xref ref-type="bibr" rid="B26">Kennedy, 1951</xref>). In the locust&#x2019;s visual pathways, two lobula giant movement detectors (LGMDs), i.e.,&#x20;the LGMD-1 and the LGMD-2, have been gradually identified and recognised to play crucial roles of collision perception, both of which respond most strongly to approaching objects signalling a direct collision course over other categories of visual movements like translating, receding, etc. (<xref ref-type="bibr" rid="B31">O&#x2019;Shea and Williams, 1974</xref>; <xref ref-type="bibr" rid="B32">O&#x2019;Shea and Rowell, 1976</xref>; <xref ref-type="bibr" rid="B33">Rind and Bramwell, 1996</xref>; <xref ref-type="bibr" rid="B40">Simmons and Rind, 1997</xref>; <xref ref-type="bibr" rid="B34">Rind and Simmons, 1999</xref>; <xref ref-type="bibr" rid="B21">Gabbiani et&#x20;al., 2002</xref>, <xref ref-type="bibr" rid="B20">Gabbiani et&#x20;al., 2004</xref>; <xref ref-type="bibr" rid="B10">Fotowat and Gabbiani, 2011</xref>; <xref ref-type="bibr" rid="B35">Rind et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B49">Yakubowski et&#x20;al., 2016</xref>). More precisely, the LGMD releases bursts of energy whenever a locust is on a collision course with its cohorts or a predator bird. These energy by neural pulses leads to evasive actions like jumping from the ground while standing, or sliding while flying (<xref ref-type="bibr" rid="B41">Simmons et&#x20;al., 2010</xref>). Surprisingly, the whole process from visual processing to behavioural response takes less than 50 milliseconds (<xref ref-type="bibr" rid="B41">Simmons et&#x20;al., 2010</xref>; <xref ref-type="bibr" rid="B46">Sztarker and Rind, 2014</xref>). Therefore, building artificial visual systems that possess the similar robustness and timeliness like the locust&#x2019;s LGMDs can undoubtedly benefit collision avoidance systems in intelligent robots and vehicles.</p>
<p>Learning from the locust&#x2019;s LGMDs visual pathways and circuits, there have been many modelling studies to investigate either the LGMD-1 or the LGMD-2 against various visual scenes including online, wheeled mobile robots (<xref ref-type="bibr" rid="B4">Blanchard et&#x20;al., 2000</xref>; <xref ref-type="bibr" rid="B50">Yue and Rind, 2005</xref>; <xref ref-type="bibr" rid="B3">Badia et&#x20;al., 2010</xref>; <xref ref-type="bibr" rid="B19">Fu et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B14">Fu et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B16">Fu et&#x20;al., 2018b</xref>; <xref ref-type="bibr" rid="B24">Isakhani et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>), walking robot (<xref ref-type="bibr" rid="B7">Cizek et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B6">Cizek and Faigl, 2019</xref>), UAVs (<xref ref-type="bibr" rid="B38">Salt et&#x20;al., 2017</xref>, <xref ref-type="bibr" rid="B37">Salt et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B52">Zhao et&#x20;al., 2019</xref>), and off-line car driving scenarios, e.g. (<xref ref-type="bibr" rid="B25">Keil et&#x20;al., 2004</xref>; <xref ref-type="bibr" rid="B43">Stafford et&#x20;al., 2007</xref>; <xref ref-type="bibr" rid="B28">Krejan and Trost, 2011</xref>; <xref ref-type="bibr" rid="B22">Hartbauer, 2017</xref>; <xref ref-type="bibr" rid="B12">Fu et&#x20;al., 2019a</xref>, <xref ref-type="bibr" rid="B17">Fu et&#x20;al., 2020a</xref>). These studies have demonstrated the effectiveness of LGMDs models as quick visual collision detectors for machine vision applications. However, due to high risk and price to replicate extremely dangerous traffic scenarios including many severe crashes, a vacancy is still there to investigate the capability and robustness of LGMDs models for addressing collision challenges from more critical scenarios where many physical collisions will happen. Regarding off-line testing approach, there is currently no comprehensive database covering real crash situations from vehicle-mounted cameras.</p>
<p>To fill these gaps, the recently published robotic platform named &#x201c;<italic>ColCOS</italic>&#x3a6;&#x201d; (<xref ref-type="bibr" rid="B44">Sun et&#x20;al., 2019</xref>) can enrich the existing experimenting &#x201c;toolbox&#x201d; in the context. The platform can be used to physically implement different multi-robot traffic mimicking real world on-road collision challenges for investigating the proposed visual systems in a practical, affordable manner. More precisely, an artificial pheromones module herein works effectively to optically render different roadmaps involving lanes and signals upon a wide LCD screen acting as the ground of an arena for robots which can pick up these cues with light sensors at bottom. Accordingly, the robots can tightly follow the paths in navigation. Moreover, to focus on investigating the proposed LGMDs inspired visual systems, we apply very basic switch control to separate the states between normal navigation (going forward) and collision avoidance (abrupt braking). Here the more complex motion strategies resembling either the locust&#x2019;s evasive behaviours or the ground vehicle&#x2019;s actions are outside the scope of this&#x20;study.</p>
<p>Therefore, the main contributions of this paper can be summarised as follows:<list list-type="simple">
<list-item>
<p>&#x2022; This research corroborates the robustness of LGMDs neuronal systems model to timely alert potential crashes in dynamic multi-robot scenes. To sharpen up the acuity of LGMDs inspired visual systems in collision sensing, an original hybrid LGMD-1 and LGMD-2 neural networks model is proposed with non-linear mapping from network outputs to alert firing rate, which works effectively.</p>
</list-item>
<list-item>
<p>&#x2022; This research complements previous experimentation on the proposed bio-inspired computation approach to collision prediction in more critical, real-physical scenarios.</p>
</list-item>
<list-item>
<p>&#x2022; This paper exhibits an innovative, tractable, and affordable robotic approach to evaluate online visual systems in different dynamic scenes.</p>
</list-item>
</list>
</p>
<p>The rest of this article is structured as follows. <xref ref-type="sec" rid="s2">Section 2</xref> elaborates on the biological background, the formulation of proposed model, and the robotic platform. <xref ref-type="sec" rid="s3">Section 3</xref> presents the experimental settings on different types of robot traffic systems. <xref ref-type="sec" rid="s4">Section 4</xref> elucidates the results with analysis. <xref ref-type="sec" rid="s5">Section 5</xref> discusses on limitations and future works. <xref ref-type="sec" rid="s6">Section 6</xref> concludes this article.</p>
</sec>
<sec id="s2">
<title>2 Methods and Materials</title>
<sec id="s2-1">
<title>2.1 Biological Inspiration</title>
<p>Within this subsection, we firstly introduce the bio-inspiration, i.e.,&#x20;characterisation of the locust LGMD-1 and LGMD-2 visual neurons for the proposed computational modelling and experimenting. <xref ref-type="fig" rid="F1">Figure&#x20;1</xref> illustrates the schematic neural structures: the two neurons are physically close to each other. In general, they have been discovered amongst a group of LGMDs in the locust&#x2019;s visual brain, a place called &#x201c;lobula area&#x201d; (<xref ref-type="bibr" rid="B31">O&#x2019;Shea and Williams, 1974</xref>; <xref ref-type="bibr" rid="B32">O&#x2019;Shea and Rowell, 1976</xref>). The LGMD-1 was first identified as a movement detector, and gradually recognised as a looming (approaching) objects detector, which responds most strongly to direct, rapid approaching objects rather than any other kinds of movements (<xref ref-type="bibr" rid="B33">Rind and Bramwell, 1996</xref>). In the same place, the LGMD-2 was also identified as a looming objects detector but with different selectivity to the LGMD-1, that is, the LGMD-2 is only sensitive to darker objects that approach against a relatively brighter background; whilst the LGMD-1 can detect either lighter or darker approaching objects (<xref ref-type="bibr" rid="B40">Simmons and Rind, 1997</xref>; <xref ref-type="bibr" rid="B46">Sztarker and Rind, 2014</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Schematic illustration of the LGMD-1 and the LGMD-2 neuromorphology. Visual stimuli are received by the pre-synaptic dendrite structures of both neurons. The feed-forward inhibition (FFI) pathway connects the LGMD-1. The DCMD (descending contra-lateral movement detector) is a one-to-one post-synaptic target neuron to the LGMD-1 conveying spikes to motion control neural system. The post-synaptic partner neuron to the LGMD-2 yet remains unknown.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g001.tif"/>
</fig>
<p>More precisely, both the looming perception visual neurons show increasing firing rates before the moving object reaching a particular angular size in the field of vision. They are rigorously inhibited at the end of objects approaching, the start of objects receding, and during transient luminance change over a large field of view. Against translating movements at constant speed, they are only activated very briefly. Most importantly, through our previous modelling and bio-robotic research (<xref ref-type="bibr" rid="B19">Fu et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B16">Fu et&#x20;al., 2018b</xref>; <xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>), we have found that though with different selectivity, both the neurons could respond strongly to movements with increasing motion intensity, such as fast approaching and accelerating translating objects. Accordingly, all these specific characteristics make the LGMD-1 and LGMD-2 unique neuronal systems to model for addressing collision challenges for intelligent robots and vehicles.</p>
</sec>
<sec id="s2-2">
<title>2.2 Model Description</title>
<p>The collision selectivity, which indicates the neuron should respond most strongly to approaching objects over other kinds of movements, is a key feature to be realised in such looming perception neural network models separating their functionality from other categories of motion sensitive neural models (<xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>). Through hundreds of millions of years evolution, the locust&#x2019;s LGMDs have been tuned with perfect collision selectivity, whereas the current computational models are not. In this regard, we have proposed a few effective methods to implement the different selectivity between the two LGMDs, and to sharpen up the selectivity via the modelling of bio-plausible ON/OFF channels (<xref ref-type="bibr" rid="B19">Fu et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B14">Fu et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>), and neural mechanisms like spike frequency adaptation (<xref ref-type="bibr" rid="B14">Fu et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B16">Fu et&#x20;al., 2018b</xref>), and adaptive inhibition (<xref ref-type="bibr" rid="B12">Fu et&#x20;al., 2019a</xref>; <xref ref-type="bibr" rid="B18">Fu et&#x20;al., 2020b</xref>). However, the collision selectivity of current models still needs to be enhanced especially in complex and dynamic visual scenes.</p>
<p>Moreover, through previous studies, we have found the LGMD-2&#x2019;s specific selectivity can complement the LGMD-1&#x2019;s when detecting darker approaching objects, since the LGMD-1 is shortly activated by the recession of darker object whereas the LGMD-2 is not (<xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>). This well matches the situations faced by ground mobile robots and vehicles since most on-road objects are relatively darker than their backgrounds, particularly in daytime navigation. An interesting question thus arises that whether the two neuronal systems can coordinate in sculpting the dark looming selectivity. Accordingly, building upon the two state-of-the-art LGMDs neural network models (<xref ref-type="bibr" rid="B16">Fu et&#x20;al., 2018b</xref>; <xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>), we propose a hybrid visual neural networks model combining the functionality of both the LGMD-1 and the LGMD-2, and investigate the robustness in dynamic visual scene. Compared to related networks, the proposed network features a non-linear unit for the product of spikes elicited by the LGMD-1 and the LGMD-2 neurons to generate the hybrid firing rate. This works effectively to sharpen up the selectivity to darker approaching objects over other motion patterns like recession. Consequently, a potential collision is detected only when both the LGMDs neurons are highly activated in the context. <xref ref-type="fig" rid="F2">Figure&#x20;2</xref> illustrates the schematic structure of the hybrid visual neural networks. The nomenclature is given in <xref ref-type="table" rid="T1">Table&#x20;1</xref>.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Schematic illustration of the proposed feed-forward collision prediction visual neural networks. There are three layers pre-synaptic to the two neurons, the photoreceptor (P), lamina (LA) and medulla (ME) layers. The pre-synaptic neural networks of LGMD-1 and LGMD-2 share the same visual processing in the first two, P and LA layers. The processing yet differs in the third ME layer for the purpose of separating their different selectivity. The ME layer consists of ON/OFF channels wherein the ON channels are rigorously suppressed in the LGMD-2&#x2019;s circuit (dashed lines). The delayed information is formed by convolving surrounding non-delayed signals in space. The FFI is an individual inhibition pathway to merely the LGMD-1. The PM is a mediation pathway to the medulla layer of the LGMD-2. The two LGMDs pool their pre-synaptic signals respectively to generate spikes that are passed to their post-synaptic neurons. Notably, the non-linearly mapped, hybrid firing rate is the network output deciding the corresponding collision avoidance response.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g002.tif"/>
</fig>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Nomenclature in the visual neural networks.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Acronym and full-name</th>
<th align="left"/>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">LGMD</td>
<td align="left">Lobula giant movement detector</td>
</tr>
<tr>
<td align="left">DCMD</td>
<td align="left">Descending contra-lateral movement detector</td>
</tr>
<tr>
<td align="left">FFI</td>
<td align="left">Feed-forward inhibition</td>
</tr>
<tr>
<td align="left">P</td>
<td align="left">Photoreceptor</td>
</tr>
<tr>
<td align="left">LA/ME</td>
<td align="left">Lamina/medulla neuron</td>
</tr>
<tr>
<td align="left">PM</td>
<td align="left">Photoreceptor mediation</td>
</tr>
<tr>
<td align="left">TD</td>
<td align="left">Time delay unit</td>
</tr>
<tr>
<td align="left">E/I</td>
<td align="left">Excitation/inhibition</td>
</tr>
<tr>
<td align="left">S/G</td>
<td align="left">Summation/grouping</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s2-2-1">
<title>2.2.1 Photoreceptors Layer</title>
<p>As shown in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>, the LGMD-1 and the LGMD-2 possess the same visual processing in the first two pre-synaptic layers. The first layer is composed of photoreceptors arranged in a matrix sensing time-varying, single-channel luminance (grey-scale in our case). The photoreceptors compute temporal derivative of every pixel to get motion information. Let <inline-formula id="inf1">
<mml:math id="m1">
<mml:mi>L</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi mathvariant="double-struck">R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> denote the input image streams, where <italic>x</italic>, <italic>y</italic>, and <italic>t</italic> are spatial and temporal positions, respectively. The current motion can be retrieved by<disp-formula id="e1">
<mml:math id="m2">
<mml:mi>P</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>L</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>L</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:munderover>
<mml:msub>
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mi>P</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mtext>where</mml:mtext>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2b;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mo>.</mml:mo>
</mml:math>
<label>(1)</label>
</disp-formula>The motion persistence is constituted by <italic>n</italic>
<sub>
<italic>p</italic>
</sub> frames.</p>
<p>In addition, the P-layer also indicates the whole-field luminance change with respect to time. This indicator is applied as the FFI in the LGMD-1 neural network, which can be obtained by averaging the overall luminance change at time <italic>t</italic>. That is,<disp-formula id="e2">
<mml:math id="m3">
<mml:mi>F</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:mo>&#x22c5;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>C</mml:mi>
<mml:mo>&#x22c5;</mml:mo>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mo>,</mml:mo>
</mml:math>
<label>(2)</label>
</disp-formula>where <italic>C</italic> and <italic>R</italic> denote columns and rows of the visual field in pixels. In addition to that, the FFI goes through a time delay unit (see TD in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>), defined as<disp-formula id="e3">
<mml:math id="m4">
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mi>F</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>F</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>/</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>f</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:math>
<label>(3)</label>
</disp-formula>
<italic>&#x3c4;</italic>
<sub>
<italic>f</italic>
</sub> stands for a time constant and <italic>&#x3c4;</italic>
<sub>
<italic>i</italic>
</sub> is the time interval between consecutive frames of digital signals, both in milliseconds. Notably, here the FFI can directly shut down the LGMD-1 neuron if it overshoots <italic>T</italic>
<sub>
<italic>ffi</italic>
</sub>, i.e.,&#x20;a predefined threshold.</p>
<p>While modelling the LGMD-2, we propose a temporal tuning mechanism, the PM in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>, to adjust local inhibitions in the medulla layer of the LGMD-2. The computations of PM conform to <xref ref-type="disp-formula" rid="e2">Eqs. 2</xref>, <xref ref-type="disp-formula" rid="e3">3</xref> which are not restated&#x20;here.</p>
</sec>
<sec id="s2-2-2">
<title>2.2.2 Lamina Layer</title>
<p>Motion information inevitably induces luminance increment or decrement over time. As shown in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>, the second lamina layer separates the relayed signals into parallel ON and OFF channels, at each node. More precisely, the luminance increment flows into the ON channel, whilst the decrement streams to the OFF channel with a sign-inverting operation. Both the channels retain positive inputs. That is,<disp-formula id="e4">
<mml:math id="m5">
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:msub>
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2b;</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:msub>
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(4)</label>
</disp-formula>[<italic>x</italic>]<sup>&#x2b;</sup> and [<italic>x</italic>]<sup>&#x2212;</sup> denote max (0, <italic>x</italic>) and min (<italic>x</italic>, 0). In addition, a small fraction (<italic>&#x3b1;</italic>
<sub>2</sub>) of previous signal is allowed to pass through.</p>
</sec>
<sec id="s2-2-3">
<title>2.2.3 Medulla Layer</title>
<p>The medulla layer is the place where different collision selectivity between LGMD-1 and LGMD-2 is shaped. The visual processing thus differs in this layer. First, in the LGMD-1&#x2019;s medulla, the delayed information varies in different polarity pathways. More precisely, in the ON channels, the local inhibition (<italic>I</italic>
<sub>
<italic>on1</italic>
</sub>) is formed by convolving surrounding delayed excitations (<italic>E</italic>
<sub>
<italic>on1</italic>
</sub>). The whole spatiotemporal computation can be defined as the following:<disp-formula id="e5">
<mml:math id="m6">
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:msub>
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(5)</label>
</disp-formula>
<disp-formula id="e6">
<mml:math id="m7">
<mml:msub>
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>/</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(6)</label>
</disp-formula>
<disp-formula id="e7">
<mml:math id="m8">
<mml:msub>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:msub>
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>r</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:math>
<label>(7)</label>
</disp-formula>
<italic>&#x3c4;</italic>
<sub>1</sub> stands for the latency of excitatory signal. <italic>r</italic> indicates the radius of convolving area [<italic>W</italic>
<sub>1</sub>] denotes the convolution kernel in the LGMD-1 that meets the following matrix:<disp-formula id="e8">
<mml:math id="m9">
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mtable class="matrix">
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>8</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>8</mml:mn>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>0</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>8</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>8</mml:mn>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:math>
<label>(8)</label>
</disp-formula>
</p>
<p>In the LGMD-1&#x2019;s OFF channels, the delay is nevertheless put forth on the inhibitory signal; the excitation is thus formed by convolving delayed lateral inhibitions. That is,<disp-formula id="e9">
<mml:math id="m10">
<mml:msub>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(9)</label>
</disp-formula>
<disp-formula id="e10">
<mml:math id="m11">
<mml:msub>
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x2b;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:msub>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>/</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(10)</label>
</disp-formula>
<disp-formula id="e11">
<mml:math id="m12">
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:msub>
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x22c5;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>r</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:math>
<label>(11)</label>
</disp-formula>Here the convolution kernel [<italic>W</italic>
<sub>2</sub>] is set equally to <italic>W</italic>
<sub>1</sub> in <xref ref-type="disp-formula" rid="e8">Eq.&#x20;8</xref>.</p>
<p>Second, in the LGMD-2&#x2019;s medulla, much stronger local inhibitions are put forth in all the ON channels forming a biased-ON pathway in order to achieve its specific selectivity to only darker objects (see dashed lines in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>). More specifically, the generation of local excitation (<italic>E</italic>
<sub>
<italic>on2</italic>
</sub>) and inhibition (<italic>I</italic>
<sub>
<italic>on2</italic>
</sub>) in the LGMD-2&#x2019;s ON channels conforms to the LGMD-1 yet with a different latency <italic>&#x3c4;</italic>
<sub>3</sub>. To implement the &#x2018;bias&#x2019;, the convolution kernel matrix [<italic>W</italic>
<sub>3</sub>] is increased with self-inhibition as<disp-formula id="e12">
<mml:math id="m13">
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mtable class="matrix">
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>2</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>2</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>2</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>2</mml:mn>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>2</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:math>
<label>(12)</label>
</disp-formula>
</p>
<p>In the LGMD-2&#x2019;s OFF pathway, the neural computation is defined as<disp-formula id="e13">
<mml:math id="m14">
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(13)</label>
</disp-formula>
<disp-formula id="e14">
<mml:math id="m15">
<mml:msub>
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>5</mml:mn>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>5</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>5</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>/</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(14)</label>
</disp-formula>
<disp-formula id="e15">
<mml:math id="m16">
<mml:msub>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:msub>
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>r</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:math>
<label>(15)</label>
</disp-formula>
</p>
<p>[<italic>W</italic>
<sub>4</sub>] fits the following matrix:<disp-formula id="e16">
<mml:math id="m17">
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mtable class="matrix">
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>8</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>8</mml:mn>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>8</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>4</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mn>8</mml:mn>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:math>
<label>(16)</label>
</disp-formula>
</p>
<p>As illustrated in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>, following the generation of local ON/OFF excitation and inhibition, there are local summation units in either the medulla layer. For the LGMD-1, the calculation is as the following:<disp-formula id="e17">
<mml:math id="m18">
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right">
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:msup>
<mml:mrow>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x22c5;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2b;</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo>,</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right">
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:msup>
<mml:mrow>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x22c5;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2b;</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo>.</mml:mo>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(17)</label>
</disp-formula>
<italic>w</italic>
<sub>1</sub> and <italic>w</italic>
<sub>2</sub> are the local biases. Note that only the positive S unit signals will pass through to the subsequent circuit. Compared to the LGMD-1, the two local biases are time varying, adjusted by the PM pathway in the LGMD-2. That is,<disp-formula id="e18">
<mml:math id="m19">
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mtext>max</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>M</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ffi</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mtext>max</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>0.5</mml:mn>
<mml:mo>,</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>M</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ffi</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(18)</label>
</disp-formula>
<disp-formula id="e19">
<mml:math id="m20">
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right">
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:msup>
<mml:mrow>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x22c5;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2b;</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo>,</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right">
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:msup>
<mml:mrow>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x22c5;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
<mml:mi>2</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2b;</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo>.</mml:mo>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(19)</label>
</disp-formula>
</p>
<p>In both the LGMDs, the polarity summation cells interact with each other in a supra-linear manner as<disp-formula id="e20">
<mml:math id="m21">
<mml:mi>S</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi mathvariant="italic">ff</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:math>
<label>(20)</label>
</disp-formula>
</p>
<p>Cascaded the S unit, a grouping unit is introduced to reduce isolated motion and enhance the extraction of expanding edges of colliding objects in cluttered backgrounds. This is implemented with a passing coefficient matrix [<italic>Ce</italic>] determined by another convolving process with an equally weighted kernel [<italic>W</italic>
<sub>
<italic>g</italic>
</sub>]. That is,<disp-formula id="e21">
<mml:math id="m22">
<mml:mi>C</mml:mi>
<mml:mi>e</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:mi>S</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x22c5;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>g</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>r</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(21)</label>
</disp-formula>
<disp-formula id="e22">
<mml:math id="m23">
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>g</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>9</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mspace width="0.3333em"/>
<mml:mo>&#xd7;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mtable class="matrix">
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mn>1</mml:mn>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:math>
<label>(22)</label>
</disp-formula>
<disp-formula id="e23">
<mml:math id="m24">
<mml:mi>G</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>S</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mi>C</mml:mi>
<mml:mi>e</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mi>&#x3c9;</mml:mi>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mtext>where</mml:mtext>
<mml:mspace width="0.3333em"/>
<mml:mi>&#x3c9;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mtext>max</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>C</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:msubsup>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x3c9;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>.</mml:mo>
</mml:math>
<label>(23)</label>
</disp-formula>
<italic>&#x3c9;</italic> is a scale parameter computed at every discrete time step. <italic>C</italic>
<sub>
<italic>&#x3c9;</italic>
</sub> is a constant. &#x394;<sub>
<italic>C</italic>
</sub> stands for a small real number. Here only the non-negative grouped signals can get through.</p>
</sec>
<sec id="s2-2-4">
<title>2.2.4&#x20;LGMD-1 and LGMD-2 Neurons</title>
<p>After the signal processing of pre-synaptic neural networks, the LGMD-1 and LGMD-2 neurons integrate corresponding local excitations from the medulla layer to generate membrane potentials. Here we apply a sigmoid transformation as the neuron activation function. The whole process can be defined as<disp-formula id="e24">
<mml:math id="m25">
<mml:mi>k</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:mrow>
<mml:mi>G</mml:mi>
</mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mi>K</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2b;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>k</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x22c5;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>C</mml:mi>
<mml:mo>&#x22c5;</mml:mo>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mo>,</mml:mo>
</mml:math>
<label>(24)</label>
</disp-formula>
</p>
<p>Subsequently, a spike frequency adaptation mechanism is applied to sculpt the neural response to moving objects threatening collision. The computation is defined as follows:<disp-formula id="e25">
<mml:math id="m26">
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>K</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="{" close="">
<mml:mrow>
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="left">
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>6</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>K</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>K</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>K</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mtext>if</mml:mtext>
<mml:mspace width="0.3333em"/>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>K</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>K</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2264;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="left">
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>6</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mi>K</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="1em"/>
<mml:mtext>otherwise</mml:mtext>
<mml:mo>,</mml:mo>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:math>
<label>(25)</label>
</disp-formula>
<disp-formula id="e26">
<mml:math id="m27">
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>6</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>/</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(26)</label>
</disp-formula>where <italic>&#x3b1;</italic>
<sub>6</sub> is a coefficient indicating the adaptation rate to visual movements calculated by the time constant <italic>&#x3c4;</italic>
<sub>
<italic>s</italic>
</sub>. Generally speaking, such a mechanism reduces neuronal firing rate to stimuli with constant or decreasing intensity, e.g., objects recede or translate at a constant speed; while it has little effect on accelerating motion with increasing intensity like the approaching.</p>
</sec>
<sec id="s2-2-5">
<title>2.2.5 Hybrid Spiking</title>
<p>As the time interval between frames of digital signals is much longer than the reaction time of real visual neurons, we map the membrane potentials exponentially to spikes by an integer-valued function. That is,<disp-formula id="e27">
<mml:math id="m28">
<mml:msup>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msup>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>7</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x22c5;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>K</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(27)</label>
</disp-formula>where <italic>T</italic>
<sub>
<italic>spi</italic>
</sub> denotes the spiking threshold and <italic>&#x3b1;</italic>
<sub>7</sub> is a scale coefficient affecting the firing rate, i.e.,&#x20;raising it will bring about more spikes within a specified time window.</p>
<p>As illustrated in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>, the elicited spikes are conveyed to their post-synaptic target neurons. Differently from previous modelling on single neuron computation of either the LGMD-1 or the LGMD-2, we herein put forward a non-linear hybrid spiking mechanism aiming at improving the selectivity to darker objects that only threaten direct collision by suppressing the response to other categories of visual stimuli. As a result, the specific selectivity of LGMD-2 well complements the LGMD-1&#x2019;s where the hybrid spiking frequency will be amplified merely when both neurons are activated. The computation is defined as<disp-formula id="e28">
<mml:math id="m29">
<mml:msubsup>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="{" close="">
<mml:mrow>
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:msubsup>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mspace width="1em"/>
<mml:mtext>if</mml:mtext>
<mml:mspace width="0.3333em"/>
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2265;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ffi</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:msubsup>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mo>&#xd7;</mml:mo>
<mml:msubsup>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mtext>otherwise</mml:mtext>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:math>
<label>(28)</label>
</disp-formula>
</p>
<p>Finally, the detection of potential collision threat can be indicated by<disp-formula id="e29">
<mml:math id="m30">
<mml:mi>C</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>l</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="{" close="">
<mml:mrow>
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mtext>True</mml:mtext>
<mml:mo>,</mml:mo>
<mml:mtext>if</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:msubsup>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1000</mml:mn>
<mml:mo>/</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x22c5;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2265;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mtext>False</mml:mtext>
<mml:mo>,</mml:mo>
<mml:mspace width="1em"/>
<mml:mtext>otherwise</mml:mtext>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:math>
<label>(29)</label>
</disp-formula>
<italic>n</italic>
<sub>
<italic>t</italic>
</sub> denotes a short time window in frames. <italic>T</italic>
<sub>
<italic>col</italic>
</sub> stands for the collision warning threshold.</p>
</sec>
<sec id="s2-2-6">
<title>2.2.6 Setting Network Parameters</title>
<p>
<xref ref-type="table" rid="T2">Table&#x20;2</xref> elucidates the parameters. In this study, we set up the parameters of neural networks depending on 1) prior knowledges from neuroscience (<xref ref-type="bibr" rid="B33">Rind and Bramwell, 1996</xref>; <xref ref-type="bibr" rid="B40">Simmons and Rind, 1997</xref>; <xref ref-type="bibr" rid="B35">Rind et&#x20;al., 2016</xref>), 2) previous experience on modelling and experimenting of the LGMD-1 and the LGMD-2 neuron models (<xref ref-type="bibr" rid="B16">Fu et&#x20;al., 2018b</xref>; <xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>), 3) considerations of fast implementation with optimisation as embedded vision systems for online visual processing (<xref ref-type="bibr" rid="B23">Hu et&#x20;al., 2018</xref>). More concretely, the convolutional matrices <italic>W</italic>
<sub>1</sub>, <italic>W</italic>
<sub>2</sub>, <italic>W</italic>
<sub>3</sub>, and <italic>W</italic>
<sub>4</sub> are not only based on previous biological and computational studies, but also optimised by &#x201c;bitwise operation&#x201d; on the embedded system. There is currently no feedback pathways and learning steps involved in the proposed hybrid neural networks. The parameters given in <xref ref-type="table" rid="T2">Table&#x20;2</xref> have been systematically investigated in our previous bio-robotic studies with optimisation (<xref ref-type="bibr" rid="B16">Fu et&#x20;al., 2018b</xref>; <xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>; <xref ref-type="bibr" rid="B14">Fu et&#x20;al., 2017</xref>). In addition to that, the very limited computational resources in the micro-robot is restricted for online learning algorithms. Therefore, aligned with previous settings, the emphasis herein is laid on investigating the integration of both LGMDs inspired visual systems in robotic implementation of dynamic visual scenes.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Setting network parameters.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Parameter</th>
<th align="center">Description</th>
<th align="center">Value</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">
<italic>n</italic>
<sub>
<italic>p</italic>
</sub>
</td>
<td align="left">Number of persistent frames</td>
<td align="left">0</td>
</tr>
<tr>
<td align="left">{<italic>C</italic>, <italic>R</italic>}</td>
<td align="left">Columns, rows of the robot visual field</td>
<td align="left">{99, 72}</td>
</tr>
<tr>
<td align="left">
<italic>&#x3c4;</italic>
<sub>
<italic>i</italic>
</sub>
</td>
<td align="left">Time interval in digital signal</td>
<td align="left">1,000/30</td>
</tr>
<tr>
<td align="left">
<italic>&#x3c4;</italic>
<sub>
<italic>f</italic>
</sub>
</td>
<td align="left">Time constant in FFI-TD</td>
<td align="left">90</td>
</tr>
<tr>
<td align="left">
<italic>&#x3b1;</italic>
<sub>2</sub>
</td>
<td align="left">Small coefficient in LA</td>
<td align="left">0.1</td>
</tr>
<tr>
<td align="left">
<italic>r</italic>
</td>
<td align="left">Radius of convolution kernel</td>
<td align="left">1</td>
</tr>
<tr>
<td align="left">
<italic>&#x3c4;</italic>
<sub>1</sub>
</td>
<td align="left">Delay in LGMD-1 ON channels</td>
<td align="left">30 in nearest cells, 60 diagonal</td>
</tr>
<tr>
<td align="left">
<italic>&#x3c4;</italic>
<sub>2</sub>
</td>
<td align="left">Delay in LGMD-1 OFF channels</td>
<td align="left">30 in nearest cells, 60 diagonal</td>
</tr>
<tr>
<td align="left">
<italic>&#x3c4;</italic>
<sub>3</sub>
</td>
<td align="left">Delay in LGMD-2 ON channels</td>
<td align="left">15 in centre, 30 nearest, 45 diagonal</td>
</tr>
<tr>
<td align="left">
<italic>&#x3c4;</italic>
<sub>4</sub>
</td>
<td align="left">Delay in LGMD-2 OFF channels</td>
<td align="left">60 in centre, 120 nearest, 180 diagonal</td>
</tr>
<tr>
<td align="left">{<italic>w</italic>
<sub>1</sub>, <italic>w</italic>
<sub>2</sub>}</td>
<td align="left">Local inhibition biases in LGMD-1</td>
<td align="left">{0.3, 0.6}</td>
</tr>
<tr>
<td align="left">
<italic>C</italic>
<sub>
<italic>&#x3c9;</italic>
</sub>
</td>
<td align="left">Constant in G units</td>
<td align="left">4</td>
</tr>
<tr>
<td align="left">&#x394;<sub>
<italic>C</italic>
</sub>
</td>
<td align="left">Small real number in G units</td>
<td align="left">0.01</td>
</tr>
<tr>
<td align="left">
<italic>&#x3c4;</italic>
<sub>
<italic>s</italic>
</sub>
</td>
<td align="left">Time constant in spike frequency adaptation</td>
<td align="left">500 &#x223c; 1,000</td>
</tr>
<tr>
<td align="left">
<italic>T</italic>
<sub>
<italic>ffi</italic>
</sub>
</td>
<td align="left">Local threshold in activation of FFI</td>
<td align="left">10</td>
</tr>
<tr>
<td align="left">
<italic>&#x3b1;</italic>
<sub>7</sub>
</td>
<td align="left">Scale parameter in spiking mechanism</td>
<td align="left">3 &#x223c; 6</td>
</tr>
<tr>
<td align="left">
<italic>T</italic>
<sub>
<italic>spi</italic>
</sub>
</td>
<td align="left">Spiking threshold</td>
<td align="left">0.7</td>
</tr>
<tr>
<td align="left">
<italic>T</italic>
<sub>
<italic>col</italic>
</sub>
</td>
<td align="left">Collision warning threshold</td>
<td align="left">40</td>
</tr>
<tr>
<td align="left">
<italic>n</italic>
<sub>
<italic>t</italic>
</sub>
</td>
<td align="left">Time window to update spike frequency</td>
<td align="left">10</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s2-3">
<title>2.3 Robotic Platform</title>
<p>Within this subsection, we introduce the robotic platform, called <italic>ColCOS</italic>&#x3a6; (<xref ref-type="bibr" rid="B44">Sun et&#x20;al., 2019</xref>), used to simulate different traffic scenarios in this research. As shown in <xref ref-type="fig" rid="F3">Figure&#x20;3</xref>, the platform mainly consists of artificial multi-pheromone module, and autonomous micro-mobile robots.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Overview of the robotic platform consisting of multiple-pheromone module and micro-mobile robots. The pheromone module is composed of a camera system connecting a computer and a TV arena. The micro-mobile robot comprises a visual sense board implementing the proposed visual systems, and a motion board for route following and emergency braking. Four colour sensors are marked in the bottom view of the robot used for sensing optically rendered pheromone cues displayed upon the LCD screen. The ID-pattern on top of robot is used to run a real time localisation system.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g003.tif"/>
</fig>
<sec id="s2-3-1">
<title>2.3.1 Artificial Pheromones Module</title>
<p>Firstly, the multiple pheromones module was originally developed to conduct the swarm robotic experiments mimicking behaviours of social insects with interactions between multiple pheromones representing different biochemical substances (<xref ref-type="bibr" rid="B44">Sun et&#x20;al., 2019</xref>). More specifically, as illustrated in <xref ref-type="fig" rid="F3">Figure&#x20;3</xref>, the module consists of a camera system, a computer, and an arena with an LCD screen acting as the ground. The computer runs a pattern recognition algorithm in real time (<xref ref-type="bibr" rid="B27">Krajn&#xed;k et&#x20;al., 2014</xref>), which is feasible to track and localise many ID-specific patterns with images at 1920 (pixels in width) &#xd7; 1,080 (pixels in height) from the top-down facing camera, simultaneously, so as to record coordinates of robots with respect to time. In addition, the computer can render the virtual pheromone components, optically, represented by colour tracks or spots on the LCD screen indicating meaningful fields for mobile robots. Here the virtual pheromones are applied to render road maps and signals in the context. As shown in <xref ref-type="fig" rid="F4">Figure&#x20;4</xref>, since the nature of pheromone field displayed on the LCD screen is a colour image, different traffic paradigms can be formed in which the roads are drawn by white tracks with boundaries, and the traffic lights and signals are embodied by green/red colour spots with appropriate size on the roads. Accordingly, different traffic sections like intersections, junctions, and even more complex road network can be established with scalability. <xref ref-type="fig" rid="F4">Figure&#x20;4</xref> shows some examples in our experiments. Together with periphery patterned walls (urban skyline), the arena is well constructed for our specific goal of simulating robotic traffic to test the proposed bio-inspired visual systems.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Using virtual pheromones to mimic roads and traffic lights: arrows indicate steering directions of robots. The roads are unidirectional.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g004.tif"/>
</fig>
</sec>
<sec id="s2-3-2">
<title>2.3.2&#x20;Micro-Mobile Robot</title>
<p>As illustrated in <xref ref-type="fig" rid="F3">Figure&#x20;3</xref>, the autonomous mobile robot used in this study is called <italic>Colias-IV</italic> (<xref ref-type="bibr" rid="B23">Hu et&#x20;al., 2018</xref>), which includes mainly two components that provide different functions, namely the <italic>Colias</italic> Basic Unit (CBU), and the <italic>Colias</italic> Sensing Unit (CSU).</p>
<p>The CBU serves preliminary robot features such as motion, power management and some basic sensing like the bumper infra-red (IR) sensors in <xref ref-type="fig" rid="F3">Figure&#x20;3</xref>. The more detailed configuration can be found in our recent paper (<xref ref-type="bibr" rid="B23">Hu et&#x20;al., 2018</xref>), which is not reiterated here. Specifically for the proposed tasks, the CBU is assembled with four colour sensors with high sensitivity on its bottom (see <xref ref-type="fig" rid="F3">Figure&#x20;3</xref>). When the robot is running in the arena, these sensors can pick up optical pheromone information on the LCD screen, and then the robot behaviours are adjusted accordingly.</p>
<p>A key factor herein is tightly following the paths. We propose a control strategy that the two-side colour sensors are applied to bind the robot trajectory on the roads, as explained in <xref ref-type="fig" rid="F5">Figure&#x20;5</xref>. Moreover, the front and rear light sensors play roles of recognising traffic signals including red (stop) and green (go) lights in the city traffic system, as well as accelerating and decelerating fields in the highway traffic system. More concrete control logic will be presented in the following <xref ref-type="sec" rid="s3">Section&#x20;3</xref>.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Robot route following strategy: keeping two-side colour sensors between road boundaries.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g005.tif"/>
</fig>
<p>The proposed LGMDs inspired visual systems are implemented for online visual processing. Here the CSU supports this where an ARM Cortex-M4F core micro controller is deployed as the main processor to handle intensive image processing. A monocular camera system with a low voltage CMOS image sensor OV7670 module is utilised in the CSU. With compact size, the camera is capable of operating up to 30 frames per second (fps) in VGA mode with output support for various colour formats. The horizontal viewing angle is approximately 70&#xb0;. As a trade-off between processing efficiency and image quality, the resolution is configured at 72 &#xd7; 99 pixels on 30 fps, with output format of 8-bit YUV422. Since the LGMDs only process grey-scale images, the camera setting fits it well, i.e.,&#x20;the proposed image format separates each pixel&#x2019;s colour channels from the brightness channel; thus no additional colour transformation is required. More details of the CSU can be found in (<xref ref-type="bibr" rid="B23">Hu et&#x20;al., 2018</xref>). Importantly, when assessing the proposed LGMDs inspired visual systems, the optical sensor is applied as the only collision detector.</p>
<p>Furthermore, the micro-robot can communicate with a host computer via a Bluetooth device connecting the CSU (<xref ref-type="bibr" rid="B23">Hu et&#x20;al., 2018</xref>). Here we use it for retrieving the hybrid spiking frequency. With limited processing memory space and transmission ability, a current drawback of the robot is that it cannot send back real-time image views accompanied by motion.</p>
</sec>
</sec>
</sec>
<sec id="s3">
<title>3 Setting Experiments</title>
<p>Within this section, we introduce the experimental settings on multi-robot traffic scenarios. Generally speaking, the proposed LGMDs inspired visual systems are tested in two types of roadmaps: the city ring roads, and the highway. With regard to the primary goal of this research to corroborate the LGMDs&#x2019; robustness in critical robot traffic, the roadmaps are designed with accident-prone sections resembling real world circumstances where crash often happens, e.g., the intersection challenge (<xref ref-type="bibr" rid="B8">Colombo and Vecchio, 2012</xref>). In addition, we also carry out comparative experiments on different densities of moving agents in the arena, and two collision sensing strategies between the bio-inspired vision and the assembled IR bumper.</p>
<p>Regarding the avoidance, the robot brakes abruptly once detecting potential crash and then resumes moving forward after a short break. Since we herein focus on corroborating the robustness of visual systems, the mechanical control for collision avoidance is out of the scope. Notably, the evasive behaviour matches neither the locust&#x2019;s jumping/hiding, nor the many on-road situations of ground vehicles. It is also worth to emphasise that there are no human interventions in the autonomous running of multiple mobile robots unless the incidents that robot fails on route following. Each kind of robot traffic lasts for 1&#xa0;hour. <xref ref-type="fig" rid="F6">Figures 6</xref>, <xref ref-type="fig" rid="F7">7</xref> and show the experimental settings from the top-down view. <xref ref-type="fig" rid="F8">Figure&#x20;8</xref> displays some arena inside views in experiments. <xref ref-type="other" rid="alg1">Algorithm 1</xref> and <xref ref-type="other" rid="alg2">Algorithm 2</xref> articulate the agent control strategies in the two kinds of traffic systems, respectively.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Illustration of two kinds of city ring road maps (with and without signals) from the top-down camera&#x2019;s view including lanes, intersections, robot vehicles and red-green switching lights control at every crossroad. All the robots navigate unidirectionally.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g006.tif"/>
</fig>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Illustration of highway traffic system including two lanes with distinct speed settings. The red signals indicate a deceleration zone as the entrance of low-speed lane. The green spot is an acceleration signal as the exit of low-speed lane. The switcher changes every 6&#xa0;s. Both the two lanes are unidirectional.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g007.tif"/>
</fig>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Illustration of arena inside views. The surroundings are decorated with urban skyline patterns.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g008.tif"/>
</fig>
<p>Here we also elucidate the relations between proposed visual systems model and control strategy. First, the model is treasured as internal component of the robot for real-time collision sensing. As presented in <xref ref-type="other" rid="alg1">Algorithm 1</xref> and <xref ref-type="other" rid="alg2">Algorithm 2</xref>, the model is solely responsible for detecting potential collision; once a danger is alerted (<xref ref-type="disp-formula" rid="e29">Eq. 29</xref>), a corresponding avoidance command is sent to the motion control of robot which is prioritised over any other control logic conducted by environment. Second, the pheromone module herein is applied only to render the external &#x201c;environment&#x201d; for multi-robots, in order to construct roads, signals that followed, recognised by robot. Compared to other pheromone based swarm robotic studies, e.g., (<xref ref-type="bibr" rid="B44">Sun et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B29">Liu et&#x20;al., 2020</xref>), the pheromones here are not released by engaged robots. More specific traffic set-ups are introduced in the following subsections.</p>
<sec id="s3-1">
<title>3.1 Setting the City Traffic System</title>
<p>Firstly, the city robot traffic consists of the roadmaps without traffic lights control, and mixed by red (stop) and green (go) spot signals. Both roadmaps include straight and curving roads, and many intersections (see <xref ref-type="fig" rid="F6">Figure&#x20;6</xref>). In addition, all the roads are unidirectional loops. As introduced previously, the robot navigation obeys the optical routes where the two boundaries confine its trajectory. In addition to that, the traffic lights also play roles of robot motion control. Algorithm 1 presents this kind of control logic. More concretely, if either the front or rear light sensor detects the red light, the robot will stop for a while until the light switches to green. The robot behaviour is set to go forward as default until potential crash or red light detected. The red and green lights switch every several seconds, constantly. Most importantly, all the robot agents prioritise the reaction to collision alert over traffic lights control.</p>
<p>In the city traffic without lights control, we also investigate the density of mobile robots at two different populations (see <xref ref-type="fig" rid="F6">Figure&#x20;6</xref>). The visual scene undoubtedly becomes more complex and dynamic with more robot agents participating in the&#x20;traffic system. In case of scenarios with lights control, we also set up the traffic that consists of unruly agents in half population to break the red-light control. Therefore, the intersections turn out to be the most dangerous zones resembling real-world intersection challenge (<xref ref-type="bibr" rid="B8">Colombo and Vecchio, 2012</xref>).</p>
<p>
<statement content-type="algorithm" id="alg1">
<label>
<bold>Algorithm 1:</bold>
</label>
<p>Agent Control Strategy in the City Traffic System.<list list-type="simple">
<list-item>
<p>1&#x20;<bold>while</bold> <italic>Power on</italic>&#x20;<bold>do</bold>
</p>
</list-item>
<list-item>
<p>2 &#x2003;Set initial forward speed randomly between 10 &#x223c; 14&#xa0;cm/s;</p>
</list-item>
<list-item>
<p>3 &#x2003;<bold>if</bold> <italic>Two-side colour sensors are between road boundaries</italic>&#x20;<bold>then</bold>
</p>
</list-item>
<list-item>
<p>4 &#x2003;&#x2003;Follow route and go forward;</p>
</list-item>
<list-item>
<p>5 &#x2003;&#x2003;<bold>if <italic>Potential collision sensed</italic>&#x20;then</bold>
</p>
</list-item>
<list-item>
<p>6 &#x2003;&#x2003;&#x2003;Brake abruptly then stop for approaximately 2&#xa0;s;</p>
</list-item>
<list-item>
<p>7 &#x2003;&#x2003;<bold>else</bold>
</p>
</list-item>
<list-item>
<p>8 &#x2003;&#x2003;&#x2003;<bold>If</bold> red light detected by front or rear light sensor&#x20;<bold>then</bold>
</p>
</list-item>
<list-item>
<p>9 &#x2003;&#x2003;&#x2003;&#x2003;Halt the movement until green light detected;</p>
</list-item>
<list-item>
<p>10 &#x2003;&#x2003;&#x2003;<bold>else</bold>
</p>
</list-item>
<list-item>
<p>11 &#x2003;&#x2003;&#x2003;&#x2003;Move forward and follow path;</p>
</list-item>
<list-item>
<p>12 &#x2003;<bold>else</bold>
</p>
</list-item>
<list-item>
<p>13 &#x2003;&#x2003;Agent is derailed in collision or route following;</p>
</list-item>
<list-item>
<p>14 &#x2003;&#x2003;agent is manually replaced on the&#x20;path</p>
</list-item>
<list-item>
<p>15&#x20;<bold>end</bold>
</p>
</list-item>
</list>
</p>
</statement>
</p>
</sec>
<sec id="s3-2">
<title>3.2 Setting the Highway Traffic System</title>
<p>Compared to the city traffic system that consists of many intersections as critical zones to challenge the proposed LGMDs inspired visual systems, the highway traffic system includes two lanes, i.e.,&#x20;low-speed and high-speed ring roads in loop, a junction where two lanes merge, a shunting mechanism to regularly separate robot vehicles into different lanes, and two light signals as the acceleration and deceleration indicators for agents, as illustrated in <xref ref-type="fig" rid="F7">Figure&#x20;7</xref>. As a result, here the road junction and high speed are two leading factors of collision. <xref ref-type="other" rid="alg2">Algorithm 2</xref> presents this type of control logic. Both the two lanes are also configured as uni-directional with a shunting mechanism to separate robots with equal opportunities to follow either lanes. To change the robot&#x2019;s speed, two types of signals are rendered by pheromones at the entrance and exit of low-speed lane, respectively (see <xref ref-type="fig" rid="F7">Figure&#x20;7</xref>). Accordingly, the robot accelerates to enter the high-speed lane whilst decelerates preceding the low-speed&#x20;lane.</p>
<p>
<statement content-type="algorithm" id="alg2">
<label>
<bold>Algorithm 2:</bold>
</label>
<p>Agent Control Strategy in the Highway Traffic System.<list list-type="simple">
<list-item>
<p>1 Navigation begins at the entrance of low-speed&#x20;lane</p>
</list-item>
<list-item>
<p>2 Initial forward speed is randomly set between 10 &#x223c; 14&#xa0;<italic>cm</italic>/<italic>s</italic>
</p>
</list-item>
<list-item>
<p>3&#x20;<bold>While</bold> <italic>Power on</italic>&#x20;<bold>do</bold>
</p>
</list-item>
<list-item>
<p>4 &#x2003;<bold>If</bold> <italic>Two-side colour sensors are between road boundaries</italic>&#x20;<bold>then</bold>
</p>
</list-item>
<list-item>
<p>5 &#x2003;&#x2003;Follow route and go forward</p>
</list-item>
<list-item>
<p>6 &#x2003;&#x2003;<bold>If</bold> potential collision sensed&#x20;<bold>then</bold>
</p>
</list-item>
<list-item>
<p>7 &#x2003;&#x2003;&#x2003;Brake abruptly then stop for approaximately 2&#xa0;s;</p>
</list-item>
<list-item>
<p>8 &#x2003;&#x2003;&#x2003; resume going forward and following&#x20;route</p>
</list-item>
<list-item>
<p>9 &#x2003;&#x2003;<bold>else</bold>
</p>
</list-item>
<list-item>
<p>__</p>
</list-item>
<list-item>
<p>10 &#x2003;&#x2003;<bold>If</bold> <italic>Acceleration signal detected by front or rear light sensor</italic>&#x20;<bold>then</bold>
</p>
</list-item>
<list-item>
<p>11 &#x2003;&#x2003;&#x2003;Entrance of high-speed lane reached;</p>
</list-item>
<list-item>
<p>12 &#x2003;&#x2003;&#x2003;Agent forward speed increases to around 21&#xa0;<italic>cm</italic>/<italic>s</italic> within 2&#xa0;s</p>
</list-item>
<list-item>
<p>13 &#x2003;&#x2003;<bold>else</bold>
</p>
</list-item>
<list-item>
<p>__</p>
</list-item>
<list-item>
<p>14 &#x2003;&#x2003;<bold>If</bold> <italic>Deceleration signal detected by front or rear light sensor</italic>&#x20;<bold>then</bold>
</p>
</list-item>
<list-item>
<p>15 &#x2003;&#x2003;&#x2003;Entrance of low-speed lane reached</p>
</list-item>
<list-item>
<p>16 &#x2003;&#x2003;&#x2003;Agent forward speed decreases back to the origin within 2&#xa0;s</p>
</list-item>
<list-item>
<p>17 &#x2003;&#x2003;<bold>else</bold>
</p>
</list-item>
<list-item>
<p>__</p>
</list-item>
<list-item>
<p>18 &#x2003;<bold>else</bold>
</p>
</list-item>
<list-item>
<p>19 &#x2003;&#x2003;Agent is derailed in collision or route following</p>
</list-item>
<list-item>
<p>20 &#x2003;&#x2003;Agent is manually replaced on the&#x20;path</p>
</list-item>
<list-item>
<p>21&#x20;<bold>end</bold>
</p>
</list-item>
</list>
</p>
</statement>
</p>
</sec>
</sec>
<sec id="s4">
<title>4 Results and Analysis</title>
<p>Within this section, we present the experimental results with analysis. Firstly, we demonstrate typical events of robot-to-robot interaction in the traffic systems, and visual systems output, i.e.,&#x20;the spike frequency of the hybrid LGMDs neural networks in the three types of investigated robot traffic scenarios. Secondly, the statistical results are given with event density maps. Lastly, we compare the proposed bio-inspired vision with another physical collision sensor in critical robot traffic. A video demo to illustrate our experiments is given in <xref ref-type="sec" rid="s11">Supplementary Video</xref>.</p>
<sec id="s4-1">
<title>4.1 Metrics</title>
<p>Regarding the statistical results, the overall collision avoidance rate (CAR) herein is used to evaluate the interactions between robot vehicles via the aforementioned localisation system, which is calculated by the following equations:<disp-formula id="e30">
<mml:math id="m31">
<mml:mi>C</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>R</mml:mi>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mtext>where</mml:mtext>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:mi>E</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:munderover accentunder="false" accent="false">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:math>
<label>(30)</label>
</disp-formula>
</p>
<p>
<italic>E</italic> and <italic>ca</italic> stand for the total robot-to-robot events and the collision avoidance with respect to time, respectively. <italic>T</italic> indicates the total running time of the localisation system in experiments. In this work, stop of the agent indicates a robot-to-robot event, thus:<disp-formula id="e31">
<mml:math id="m32">
<mml:mi>E</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="{" close="">
<mml:mrow>
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mi>i</mml:mi>
<mml:mi>f</mml:mi>
<mml:mtext>agent stops</mml:mtext>
<mml:mspace width="0.3333em"/>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mtext>otherwise</mml:mtext>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:math>
<label>(31)</label>
</disp-formula>With regard to the multi-robot localisation system (see <xref ref-type="fig" rid="F3">Figure&#x20;3</xref>), an accomplishment of collision avoidance should satisfy the following criterion:<disp-formula id="e32">
<mml:math id="m33">
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mfenced open="{" close="">
<mml:mrow>
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mtext>if</mml:mtext>
<mml:mspace width="0.3333em"/>
<mml:mtext>agent stops</mml:mtext>
<mml:mi>a</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>d</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>q</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3e;</mml:mo>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3b3;</mml:mi>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mtext>otherwise</mml:mtext>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:math>
<label>(32)</label>
</disp-formula>
<disp-formula id="e33">
<mml:math id="m34">
<mml:mtext>where</mml:mtext>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>q</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mo>&#x2b;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:msqrt>
<mml:mo>.</mml:mo>
</mml:math>
<label>(33)</label>
</disp-formula>
<italic>d</italic> is the Euclidean distance between robot <italic>p</italic> at position (<italic>x</italic>
<sub>
<italic>p</italic>
</sub>, <italic>y</italic>
<sub>
<italic>p</italic>
</sub>) and robot <italic>q</italic> at position (<italic>x</italic>
<sub>
<italic>q</italic>
</sub>, <italic>y</italic>
<sub>
<italic>q</italic>
</sub>) in the two-dimensional image plane, and <italic>p</italic>, <italic>q</italic> denote the time-varying coordinates of every two mobile robots given time <italic>t</italic>. <italic>&#x3b3;</italic> &#x3d; 20 (in pixels) is the predefined distance threshold to decide a successful collision avoidance in the critical robot traffic. Moreover, since the intersections and junctions are the most challenging zones for the robots that resemble the real world on-road situations, we also compare the safe passing rates (<bold>PR</bold>) on the intersections and junctions (<italic>PR</italic>
<sub>1</sub>), as well as other road sections including the straight and curving roads (<italic>PR</italic>
<sub>2</sub>). The calculations are comparable to the CAR with regional information as follows:<disp-formula id="e34">
<mml:math id="m35">
<mml:mi>P</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>E</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>o</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mi>P</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>E</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>o</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:mo>,</mml:mo>
</mml:math>
<label>(34)</label>
</disp-formula>
<disp-formula id="e35">
<mml:math id="m36">
<mml:mtext>where</mml:mtext>
<mml:mspace width="0.3333em"/>
<mml:mi>E</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>o</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x2b;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mi>E</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>o</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msub>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:mo>&#x3d;</mml:mo>
<mml:mspace width="0.17em"/>
<mml:mspace width="0.17em"/>
<mml:msub>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>.</mml:mo>
</mml:math>
<label>(35)</label>
</disp-formula>
<italic>EPro</italic>
<sub>1</sub> and <italic>EPro</italic>
<sub>2</sub> denote the probability for critical events of interactions between engaged robots at the intersections/junctions and the other road sections, respectively.</p>
</sec>
<sec id="s4-2">
<title>4.2 Robot-to-Robot Interactions</title>
<p>To illustrate how the autonomous micro-robots behave in the simulated traffic systems guided by the collision prediction visual systems, some typical robot-to-robot interactions are depicted in <xref ref-type="fig" rid="F9">Figure&#x20;9</xref>. It appears that the avoidance behaviours are most likely aroused at some critical moments, for example, two robots meeting at the junction (see <xref ref-type="fig" rid="F9">Figure&#x20;9C</xref>), queueing effect by robots on the same lane, yet at different speeds (see <xref ref-type="fig" rid="F9">Figure&#x20;9D</xref>). In other normal situations (see <xref ref-type="fig" rid="F9">Figures 9A,B</xref>), the robots navigate smoothly without collision avoidance. Interestingly, when the robot on curving road is facing a nearby-lane oncoming agent, there is usually an alert for a potential crash that well matches the real world driving behaviour (<xref ref-type="bibr" rid="B42">Sivaraman and Trivedi, 2013</xref>) (see <xref ref-type="fig" rid="F9">Figure&#x20;9E</xref>). In the city traffic system, intersections are the most challenging places for robots to predict imminent crashes. When two vehicles meeting at an intersection, the urgent crossing of one agent in a very short distance could excite both the LGMDs to fire together for a positive alert (see <xref ref-type="fig" rid="F9">Figure&#x20;9F</xref>). In this regard, the mimicked red-green light signals can help to alleviate the risk at intersections to a large extent like the real world on-road situations. Here we nevertheless query whether the proposed visual systems, on their own, can cope with such dangerous circumstances without the traffic signal control. The comparative experiments will be carried out in the following subsection.</p>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>Illustrations of typical traffic phases of robot-to-robot interactions. Each phase is shown by three subsequent snapshots. The trajectories are depicted in colour lines each representing an ID-identified agent. The sites of crash avoidance are marked by green circles.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g009.tif"/>
</fig>
</sec>
<sec id="s4-3">
<title>4.3 Neural Network Response</title>
<p>To articulate the responses of LGMDs hybrid neural networks in different robot traffic scenes, <xref ref-type="fig" rid="F10">Figure&#x20;10</xref> illustrates three sets of model output, that is, the hybrid firing rate. Considering the introduced two types of traffic scenarios, we remotely collected the data from a robot agent interacting with others. It can bee seen from the results that a large number of collision alerts have been signalled by the LGMDs model in the embedded vision of tested robot during navigation. In addition, the neural networks respond much more constantly in the highway traffic system that consists of high-speed robot vehicles and junction.</p>
<fig id="F10" position="float">
<label>FIGURE 10</label>
<caption>
<p>The outputs of proposed hybrid neural network model, i.e.,&#x20;the spike rate from a robot agent interacting within three multi-agent traffic systems, each lasting for 8&#xa0;min: the horizontal dashed line indicates the alert level of firing&#x20;rate.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g010.tif"/>
</fig>
</sec>
<sec id="s4-4">
<title>4.4 Performance in Critical Robot Traffic</title>
<p>This subsection reports on the performance of the proposed collision prediction visual systems under constrained computation power against different robot traffic challenges. The overall CAR is given in <xref ref-type="table" rid="T3">Table&#x20;3</xref>. The comparative results on specific PR are given in <xref ref-type="table" rid="T4">Table&#x20;4</xref>.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>CAR in multi-robot traffic.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Traffic system type</th>
<th align="center">Total events</th>
<th align="center">Crash</th>
<th align="center">CAR (%)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">City traffic: No signals (5 agents)</td>
<td align="char" char=".">425</td>
<td align="char" char=".">81</td>
<td align="char" char=".">80.94</td>
</tr>
<tr>
<td align="left">City traffic: No signals (10 agents)</td>
<td align="char" char=".">1,239</td>
<td align="char" char=".">248</td>
<td align="char" char=".">79.98</td>
</tr>
<tr>
<td align="left">City traffic: Red-green lights (5 agents)</td>
<td align="char" char=".">545</td>
<td align="char" char=".">39</td>
<td align="char" char=".">
<bold>92.84</bold>
</td>
</tr>
<tr>
<td align="left">City traffic: Red-green lights (2/5 unruly agents)</td>
<td align="char" char=".">737</td>
<td align="char" char=".">129</td>
<td align="char" char=".">82.50</td>
</tr>
<tr>
<td align="left">Highway traffic (5 agents)</td>
<td align="char" char=".">1,199</td>
<td align="char" char=".">240</td>
<td align="char" char=".">79.98</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>PR in multi-robot traffic.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th colspan="5" align="left">Intersection and junction: EPro<sub>1</sub> and PR<sub>1</sub>, other sections: EPro<sub>2</sub> and PR<sub>2</sub>
</th>
</tr>
<tr>
<th align="left">Traffic system type</th>
<th align="center">EPro<sub>1</sub> (%)</th>
<th align="center">PR<sub>1</sub> (%)</th>
<th align="center">EPro<sub>2</sub> (%)</th>
<th align="center">PR<sub>2</sub> (%)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">City traffic: No signals (5 agents)</td>
<td align="char" char=".">53.65</td>
<td align="char" char=".">71.49</td>
<td align="char" char=".">46.35</td>
<td align="char" char=".">91.88</td>
</tr>
<tr>
<td align="left">City traffic: No signals (10 agents)</td>
<td align="char" char=".">55.21</td>
<td align="char" char=".">73.54</td>
<td align="char" char=".">44.79</td>
<td align="char" char=".">87.93</td>
</tr>
<tr>
<td align="left">City traffic: Traffic lights (5 agents)</td>
<td align="char" char=".">
<bold>60.0</bold>
</td>
<td align="char" char=".">
<bold>91.74</bold>
</td>
<td align="char" char=".">40.0</td>
<td align="char" char=".">
<bold>94.50</bold>
</td>
</tr>
<tr>
<td align="left">City traffic: Traffic lights (2 unruly in 5)</td>
<td align="char" char=".">59.43</td>
<td align="char" char=".">77.85</td>
<td align="char" char=".">40.57</td>
<td align="char" char=".">89.30</td>
</tr>
<tr>
<td align="left">Highway traffic (5 agents)</td>
<td align="char" char=".">43.20</td>
<td align="char" char=".">75.29</td>
<td align="char" char=".">
<bold>56.80</bold>
</td>
<td align="char" char=".">83.55</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s4-4-1">
<title>4.4.1 Performance in the City Traffic System</title>
<p>In the city traffic system, we carry out systematic and comparative experiments involving several cases. In the first case, the robot traffic has no signal controls at intersections. We also look deeper into the density effect on collision prediction performance. <xref ref-type="fig" rid="F11">Figure&#x20;11</xref> illustrates the event and density maps of all micro-robot agents engaging in the ring road traffic for 1-h implementation.</p>
<fig id="F11" position="float">
<label>FIGURE 11</label>
<caption>
<p>Illustration of event and density maps from top-down view of camera including avoidance and crash in the city traffic system at two comparative populations of robot vehicles without light signals at intersections. <bold>(A, B)</bold> Event maps: red and green circles indicate the positions of crash and avoidance events between robot vehicles, respectively. <bold>(C&#x2013;F)</bold> Density maps: X-Y plane denotes the image coordinates.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g011.tif"/>
</fig>
<p>In the second case, the red and green switching traffic lights are used as the auxiliary signals for robot flows control at intersections. An interesting episode is plotted in this case by mixing unruly robot agents not obeying the law of traffic signals, i.e.,&#x20;red to stop and green to go, that mimic the drivers who always break the traffic rules at intersections leading to immense on-road safety issue. <xref ref-type="fig" rid="F12">Figure&#x20;12</xref> illustrates the corresponding results at this&#x20;point.</p>
<fig id="F12" position="float">
<label>FIGURE 12</label>
<caption>
<p>Illustration of event and density maps in the city traffic system (5 robot vehicles) with signals control and engaged unruly agents at intersections.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g012.tif"/>
</fig>
<p>Together with the statistical results in <xref ref-type="table" rid="T3">Tables 3</xref>, <xref ref-type="table" rid="T4">T4</xref>, we have the following observations on the experiments of city traffic system:<list list-type="simple">
<list-item>
<p>1) <xref ref-type="table" rid="T4">Table&#x20;4</xref> shows that more than half critical events take place at intersections in all the imitations of city ring road traffic (see EPro<sub>1</sub> in <xref ref-type="table" rid="T4">Table&#x20;4</xref>) indicating that our robot traffic could reflect real world road challenges (<xref ref-type="bibr" rid="B8">Colombo and Vecchio, 2012</xref>).</p>
</list-item>
<list-item>
<p>2) Compared to the performance at intersections, the PR is quite higher in the straight and curving road (all above 80%). To be more intuitive, <xref ref-type="fig" rid="F11">Figures 11</xref>, <xref ref-type="fig" rid="F12">12</xref> also demonstrate that the crash most frequently occurs at intersections with relatively lower PR, which show higher crash densities there; on the other hand, the PR is fairly higher in other road sections corresponding to higher avoidance densities.</p>
</list-item>
<list-item>
<p>3) The overall CAR peaks in case of the city traffic system with lights, and without unruly agents (92.84%). Compared to that, the CAR reaches valley once lacking red-green signals to relieve the traffic flows at intersections (80.94%).</p>
</list-item>
<list-item>
<p>4) On the aspect of density comparison, there is merely tiny difference on both the CAR and PR of two investigated populations, which reveal that the proposed visual systems perform robustly for collision prediction even in more dynamic environment.</p>
</list-item>
</list>
</p>
<p>Generally speaking, the proposed bio-inspired hybrid neural networks work effectively and consistently on collision prediction in the city traffic system despite that the intersections are still posing challenges on timely detection-and-avoidance using the visual approach as the only modality. However, we believe this can be improved by increasing the view angle of optical sensor as the current view of frontal camera can only reach approximately 70&#xb0;. The risk of intersection could also be alleviated by sensor fusion strategy, or other algorithms in intelligent transportation system (<xref ref-type="bibr" rid="B8">Colombo and Vecchio, 2012</xref>). With discrepancies amongst forward velocities of multi-robots (refer to the setting in <xref ref-type="other" rid="alg1">Algorithm 1</xref>), the robot vehicles well demonstrate queueing effect guided by the collision prediction systems. On the straight and curving roads, the LGMDs inspired visual systems perform more robustly and consistently on collision alert in comparison with the intersections. Additionally, the robot density in the traffic system dose not greatly affect the overall performance of visual systems, which imply the proposed bio-inspired computation is robust and flexible to more dynamic visual scenes.</p>
</sec>
<sec id="s4-4-2">
<title>4.4.2 Performance in the Highway Traffic System</title>
<p>In the critical highway traffic system, two lanes separate the speed of robots into two ranges, as presented in <xref ref-type="sec" rid="s3">Section 3</xref>. The overall CAR and PR are given in <xref ref-type="table" rid="T3">Tables 3</xref>, <xref ref-type="table" rid="T4">T4</xref>. In addition, <xref ref-type="fig" rid="F13">Figure&#x20;13</xref> illustrates the results with event and density maps. Here the most noticeable observation is that in comparison with the city traffic system barring the 10-agents case, more than twofold critical events take place in the highway traffic within the 1-h implementation. <xref ref-type="table" rid="T4">Table&#x20;4</xref> clearly shows that nearly half (43.2%) critical events concentrate at the junction where the high-speed and low-speed robot flows merge. In spite of that, the overall CAR remains fairly high that is in consistent with the city traffic system without signals control (79.98%); the PR at either the junction (75.29%) or the other road sections (83.55%) is slightly lower than the city traffic results. In general, the LGMDs inspired visual systems are robust to cope with collision prediction in high-speed, dynamic visual scene, in the micro-robot under constrained computation&#x20;cost.</p>
<fig id="F13" position="float">
<label>FIGURE 13</label>
<caption>
<p>Illustration of event and density maps in the highway traffic system.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g013.tif"/>
</fig>
<p>On the other hand, we also find challenges through the experiments. The event density maps in <xref ref-type="fig" rid="F13">Figure&#x20;13</xref> demonstrate that it is still difficult to address the crash avoidance problems at the junction where the low-speed robot vehicles are accelerating to merge into the high-speed flow. At this point, the robots are required to form a queue to pass the junction free of collision. The similar situations happen at the deceleration zone where the high-speed vehicles are shunted to queue into the low-speed flow. In addition to that, compared to the city traffic results, the PR in other sections is relatively lower, i.e.,&#x20;more crashes between robots occur on the high-speed curving road (see <xref ref-type="fig" rid="F13">Figure&#x20;13C</xref>).</p>
</sec>
</sec>
<sec id="s4-5">
<title>4.5 Sensor Comparison</title>
<p>Through the previous experiments, we have shown the effectiveness and robustness of LGMDs inspired visual systems for timely collision prediction in critical robot traffic. The energy efficiency have also been verified via the successful implementation on the micro-robot under extremely constrained computation power. As an alternative, optical approach to collision detection, the proposed bio-inspired computation could be scalable across various platforms and scenarios. In the last type of experiments, we also compare this visual approach with another classic physical sensor strategy&#x2013;the IR bumper sensors used extensively in robotics for collision sensing and avoidance.</p>
<p>The micro-robot possesses three IR sensors as short-range obstacle sensing technique. <xref ref-type="fig" rid="F14">Figure&#x20;14</xref> compares the detection range between the two physical sensor strategies. It appears that the combination of three bumper sensors has wider coverage in space up to approximately 90&#xb0; than the monocular camera which could reach only 70&#xb0;. On the other hand, the optical sensor has much longer sensing distance with respect to the advantage of optical methods. In this kind of experiments, the robot vehicle applies the same braking avoidance behaviour guided by the bumpers. The other experimental setting is in accordance with the earlier experiments. Each type of traffic system implementation lasts for 1&#xa0;hour, the same duration.</p>
<fig id="F14" position="float">
<label>FIGURE 14</label>
<caption>
<p>Schematic comparison on sensing range of two physical sensor strategies between the combination of three IR bumper sensors and the frontal monocular camera.</p>
</caption>
<graphic xlink:href="frobt-08-529872-g014.tif"/>
</fig>
<p>
<xref ref-type="table" rid="T5">Table&#x20;5</xref> lists the CAR of IR based technique in the two traffic systems. Though with wider coverage in front, here the CAR of IR based technique is much lower in both types of traffic system (66.81% in city traffic without lights and 48.30% in highway), compared with the performance of LGMDs inspired visual systems (80.94% in city traffic without lights and 79.98% in highway). Compared to the proposed optical approach, the short-range technique can not fulfil the timely crash prediction in the critical robot traffic. The short distance between interactive robots brings about a smaller amount of critical events within an identical time window. Besides that, the CAR is even lower in the highway scenario, which points out that the IR short-range detector is unsuitable to predicting high-speed approaching objects very timely; whilst the proposed approach can signal an impending crash quite earlier. With more abundant features extracted, filtered from the dynamic visual scene, the hybrid LGMDs inspired visual systems are more robust in collision prediction.</p>
<table-wrap id="T5" position="float">
<label>TABLE 5</label>
<caption>
<p>CAR of IR sensors in multi-robot traffic.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Traffic system type</th>
<th align="center">Total events</th>
<th align="center">Avoidance</th>
<th align="center">CAR (%)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">City traffic: No signals (5 agents)</td>
<td align="char" char=".">226</td>
<td align="char" char=".">151</td>
<td align="char" char=".">66.81</td>
</tr>
<tr>
<td align="left">Highway traffic (5 agents)</td>
<td align="char" char=".">176</td>
<td align="char" char=".">85</td>
<td align="char" char=".">48.30</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s5">
<title>5 Discussion</title>
<p>In this section, we discuss further on observed problems through the experiments, and point out corresponding future works. Firstly, we have seen some limitations of the proposed approach for quick collision detection in the context of robot traffic. Some critical conditions are still challenging the proposed LGMDs inspired visual systems. In the city traffic system particularly without signals control, crashes generally take place at intersections (see <xref ref-type="fig" rid="F11">Figure&#x20;11</xref>). During the experiments, we have observed that there is a possibility that two robot vehicles are arriving at the intersection, simultaneously. The current approach as frontal collision sensing technique can not well cope with such a problem. On the other hand, crashes are significantly reduced if the robots reached intersection in succession, such as the example shown in <xref ref-type="fig" rid="F9">Figure&#x20;9F</xref>; the successful avoidance density is fairly high near the intersections in the city traffic system, as shown in <xref ref-type="fig" rid="F11">Figures 11</xref>, <xref ref-type="fig" rid="F12">12</xref>. The proposed approach can predict a danger by nearby object crossing the field of vision, very robustly and timely. In this research, a possible restriction is the limited view angle of the monocular camera system in the micro-robot. Therefore, we will develop binocular and panoramic camera systems for future scientific&#x20;study.</p>
<p>In the highway traffic system, we have noticed that the very high speed movement, i.e.,&#x20;robot velocity over 20&#xa0;cm/s in the context, is another problem to the LGMDs inspired visual systems embedded in the micro-robot vision. In our previous studies on LGMD-1 (<xref ref-type="bibr" rid="B16">Fu et&#x20;al., 2018b</xref>) and LGMD-2 (<xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>), we have figured out that the LGMDs models demonstrate speed response to approaching object, i.e.,&#x20;the neural networks deliver stronger output against faster approaching object at higher angular velocity. The speed response and looming selectivity of LGMDs models is achieved by the competition between excitation and two kinds of inhibitions&#x2013;the lateral inhibition, and the FFI. Most importantly, the former inhibition works effectively to sculpt such selectivity when objects expanding on the field of vision before reaching a particular angular size. Otherwise, the FFI (or PM in the LGMD-2) mechanisms could immediately suppress the LGMDs at some critical moments like the end of approaching, the start of receding. Accordingly, the proposed visual systems in the high-speed moving robot have always confronted such difficult situation. This gives reasonable explanation on the higher crash density near the junction where the two lanes merge (see <xref ref-type="fig" rid="F13">Figure&#x20;13</xref>). In another word, the high-speed agent could not appropriately predict a crash with the emerged low-speed agent at the junction. In this regard, future effort is in demand to enable the visual systems to well cope with ultra-fast approaching movements.</p>
<p>Regarding the control strategy, as it is not the focus of this research, we have applied very basic switch control between two states, i.e.,&#x20;move and stop, in order to enable the robots to tightly follow paths in all kinds of traffic systems. As a result, the potential crash avoidance is led by abrupt braking which can not fulfil the very complicated, real world emergency actions of vehicles. For example, the deceleration earlier to urgent stop has not been involved in the control of micro-robots. We will incorporate in the robotic motion system more advanced control method, e.g., the fuzzy control, to enrich the robot avoidance reaction corresponding to more realistic behaviours (<xref ref-type="bibr" rid="B42">Sivaraman and Trivedi, 2013</xref>).</p>
<p>Secondly, in comparison with previous robot arena experiments on the LGMDs inspired visual systems, in which the robot motion was not confined by specific trajectories (<xref ref-type="bibr" rid="B14">Fu et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B13">Fu et&#x20;al., 2019b</xref>), this study strictly binds the robot motion in navigation (see <xref ref-type="fig" rid="F5">Figure&#x20;5</xref>, <xref ref-type="other" rid="alg1">Algorithms 1</xref>, <xref ref-type="other" rid="alg2">2</xref>). The prioritised goal of robot motion is to tightly follow the paths desired by the robot traffic implementation. However, the current motion strategy has the flaw that the robots usually experience yaw rotations in route following. This sometimes results in false positives of collision alert. We will explore new methods in the LGMDs neuronal system model to habituate such visual movements, and also improve the robot route following strategy.</p>
<p>Last but not least, the robot vehicles currently are not fully autonomous in traffic systems. Despite human interventions in&#x20;merely specific conditions during experiments (e.g., the robot fails&#x20;in route following or collide with other agents), the human-robot interactions have still influenced the robot traffic implementation, e.g., manually replacing the robot on routes after crash. Accordingly, the different robot traffic systems need to be verified with respect to (<xref ref-type="bibr" rid="B9">Fisher et&#x20;al., 2021</xref>). The safety and functional correctness of the robot traffic reflecting some real world scenes also need to be further validated according to (<xref ref-type="bibr" rid="B47">Webster et&#x20;al., 2020</xref>).</p>
</sec>
<sec id="s6">
<title>6 Concluding Remarks</title>
<p>This paper has presented a novel study on investigating bio-inspired computation approach to collision prediction in dynamic robot traffic reflecting some real world on-road challenges. To fill the scientific study gap on evaluating online artificial visual systems in dangerous scenarios where physical crashes are prone to happen, we have applied a recently published robotic platform to construct traffic systems including the city roadmap with many intersections, and the highway with junctions. To sharpen up the acuity of collision detection visual systems to darker objects approaching over other categories of movements like recession, we have integrated two LGMDs neuronal models, i.e.,&#x20;the LGMD-1 and LGMD-2 neural networks, as a hybrid model outputting alert firing rate. A potential collision is predicted only when both the LGMDs are highly activated. To focus on investigating the proposed collision prediction visual systems, we have applied the simple bang-bang control to allow the robot to tightly follow paths and brake abruptly corresponding to the avoidance action. The arena experiments have verified the robustness of the proposed approach to timely collision alert for engaged robot vehicles in the traffic systems. This research has complemented the previous experimentation on such bio-inspired visual systems in more critical real-physical scenarios, under extremely constrained computation power. This also has provided an alternative, energy-efficient technique to current collision alert systems. The propose visual systems can be transformed into neuromorphic sensing paradigms which could be prevalent for future autonomous machines.</p>
</sec>
</body>
<back>
<sec id="s7">
<title>Data Availability Statement</title>
<p>All datasets generated for this study are included in the article/<xref ref-type="sec" rid="s11">Supplementary Video</xref>.</p>
</sec>
<sec id="s8">
<title>Author Contributions</title>
<p>QF, XS, TL contributed as the joint first authors. CH configured the micro-robot. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec id="s9">
<title>Funding</title>
<p>This research has received funding from the European Union&#x2019;s Horizon 2020 research and innovation programme under the&#x20;Marie Sklodowska-Curie Grant Agreement No 778602&#x20;ULTRACEPT, the National Natural Science Foundation of&#x20;China under the Grant No 12031003, and the China Postdoctoral Science Foundation Grant 2020M682651.</p>
</sec>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s12">
<title>Supplementary Material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frobt.2021.529872/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frobt.2021.529872/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="Video1.mp4" id="SM1" mimetype="application/mp4" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Albaker</surname>
<given-names>B. M.</given-names>
</name>
<name>
<surname>Rahim</surname>
<given-names>N. A.</given-names>
</name>
</person-group> (<year>2009</year>). &#x201c;<article-title>A Survey of Collision Avoidance Approaches for Unmanned Aerial Vehicles</article-title>,&#x201d; in <conf-name>2009 International Conference for Technical Postgraduates</conf-name>, <conf-loc>Kuala Lumpur, Malaysia</conf-loc> <publisher-name>IEEE</publisher-name>. <pub-id pub-id-type="doi">10.1109/techpos.2009.5412074</pub-id> </citation>
</ref>
<ref id="B2">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Ammoun</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Nashashibi</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2009</year>). &#x201c;<article-title>Real Time Trajectory Prediction for Collision Risk Estimation between Vehicles</article-title>,&#x201d; in <conf-name>IEEE 5th International Conference on Intelligent Computer Communication and Processing</conf-name>, <conf-loc>Cluj-Napoca, Romania</conf-loc> <publisher-name>IEEE</publisher-name>. <pub-id pub-id-type="doi">10.1109/iccp.2009.5284727</pub-id> </citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Berm&#xfa;dez i Badia</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bernardet</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Verschure</surname>
<given-names>P. F. M. J.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Non-linear Neuronal Responses as an Emergent Property of Afferent Networks: A Case Study of the Locust Lobula Giant Movement Detector</article-title>. <source>Plos Comput. Biol.</source> <volume>6</volume>, <fpage>e1000701</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pcbi.1000701</pub-id> </citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Blanchard</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
<name>
<surname>Verschure</surname>
<given-names>P. F. M. J.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>Collision Avoidance Using a Model of the Locust Lgmd Neuron</article-title>. <source>Robotics Autonomous Syst.</source> <volume>30</volume>, <fpage>17</fpage>&#x2013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.1016/s0921-8890(99)00063-9</pub-id> </citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chang</surname>
<given-names>W.-J.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>L.-B.</given-names>
</name>
<name>
<surname>Su</surname>
<given-names>K.-Y.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Deepcrash: A Deep Learning-Based Internet of Vehicles System for Head-On and Single-Vehicle Accident Detection With Emergency Notification</article-title>. <source>IEEE Access.</source> <volume>7</volume>, <fpage>148163</fpage>&#x2013;<lpage>148175</lpage>. <pub-id pub-id-type="doi">10.1109/access.2019.2946468</pub-id> </citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cizek</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Faigl</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Self-Supervised Learning of the Biologically-Inspired Obstacle Avoidance of Hexapod Walking Robot</article-title>. <source>Bioinspiration &#x26; Biomimetics.</source> <volume>14</volume>, <fpage>046002</fpage>. <pub-id pub-id-type="doi">10.1088/1748-3190/ab1a9c</pub-id> </citation>
</ref>
<ref id="B7">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Cizek</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Milicka</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Faigl</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Neural Based Obstacle Avoidance with CPG Controlled Hexapod Walking Robot</article-title>,&#x201d; in <conf-name>Proceedings of the 2017 IEEE International Joint Conference on Neural Networks (IJCNN)</conf-name>, <conf-loc>Anchorage, AK, USA</conf-loc> (<publisher-name>IEEE</publisher-name>), <fpage>650</fpage>&#x2013;<lpage>656</lpage>. <pub-id pub-id-type="doi">10.1109/ijcnn.2017.7965914</pub-id> </citation>
</ref>
<ref id="B8">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Colombo</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Vecchio</surname>
<given-names>D. D.</given-names>
</name>
</person-group> (<year>2012</year>). &#x201c;<article-title>Efficient Algorithms for Collision Avoidance at Intersections</article-title>,&#x201d; in <conf-name>Proceedings of the 15th ACM International Conference on Hybrid Systems</conf-name>, <conf-loc>Beijing China</conf-loc> (<publisher-name>Computation and Control</publisher-name>), <fpage>145</fpage>&#x2013;<lpage>154</lpage>. <pub-id pub-id-type="doi">10.1145/2185632.2185656</pub-id> </citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fisher</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Mascardi</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Rozier</surname>
<given-names>K. Y.</given-names>
</name>
<name>
<surname>Schlingloff</surname>
<given-names>B.-H.</given-names>
</name>
<name>
<surname>Winikoff</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Yorke-Smith</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Towards a Framework for Certification of Reliable Autonomous Systems</article-title>. <source>Auton. Agent Multi-Agent Syst.</source> <volume>35</volume> (<issue>8</issue>). <pub-id pub-id-type="doi">10.1007/s10458-020-09487-2</pub-id> </citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fotowat</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Gabbiani</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Collision Detection as a Model for Sensory-Motor Integration</article-title>. <source>Annu. Rev. Neurosci.</source> <volume>34</volume>, <fpage>1</fpage>&#x2013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.1146/annurev-neuro-061010-113632</pub-id> </citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Franceschini</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Small Brains, Smart Machines: From Fly Vision to Robot Vision and Back Again</article-title>. <source>Proc. IEEE.</source> <volume>102</volume>, <fpage>751</fpage>&#x2013;<lpage>781</lpage>. <pub-id pub-id-type="doi">10.1109/jproc.2014.2312916</pub-id> </citation>
</ref>
<ref id="B12">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Bellotto</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Claire Rind</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019a</year>). &#x201c;<article-title>A Visual Neural Network for Robust Collision Perception in Vehicle Driving Scenarios</article-title>,&#x201d; in <conf-name>Artificial Intelligence Applications and Innovations</conf-name>, <conf-loc>Crete, Greece</conf-loc> (<publisher-name>Springer International Publishing</publisher-name>), <fpage>67</fpage>&#x2013;<lpage>79</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-19823-7_5</pub-id> </citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019b</year>). <article-title>Towards Computational Models and Applications of Insect Visual Systems for Motion Perception: A Review</article-title>. <source>Artif. Life.</source> <volume>25</volume>, <fpage>263</fpage>&#x2013;<lpage>311</lpage>. <pub-id pub-id-type="doi">10.1162/artl_a_00297</pub-id> </citation>
</ref>
<ref id="B14">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Collision Selective LGMDs Neuron Models Research Benefits From a Vision-Based Autonomous Micro Robot</article-title>,&#x201d; in <conf-name>Proceedings of the 2017 IEEE/RSJ international conference on intelligent robots and systems (IROS)</conf-name>, <conf-loc>Vancouver, Canada</conf-loc> (<publisher-name>IEEE</publisher-name>), <fpage>3996</fpage>&#x2013;<lpage>4002</lpage>. <pub-id pub-id-type="doi">10.1109/iros.2017.8206254</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2018a</year>). &#x201c;<article-title>Towards Computational Models of Insect Motion Detectors for Robot Vision</article-title>,&#x201d; in <conf-name>Towards autonomous robotic systems conference</conf-name>, <conf-loc>Bristol, United Kingdom</conf-loc>. Editors <person-group person-group-type="editor">
<name>
<surname>Giuliani</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Assaf</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Giannaccini</surname>
<given-names>M. E.</given-names>
</name>
</person-group> (<publisher-name>Springer International Publishing</publisher-name>), <fpage>465</fpage>&#x2013;<lpage>467</lpage>. </citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Peng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2018b</year>). <article-title>Shaping the Collision Selectivity in a Looming Sensitive Neuron Model with Parallel on and off Pathways and Spike Frequency Adaptation</article-title>. <source>Neural Networks.</source> <volume>106</volume>, <fpage>127</fpage>&#x2013;<lpage>143</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2018.04.001</pub-id> </citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Peng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020a</year>). <article-title>A Robust Collision Perception Visual Neural Network with Specific Selectivity to Darker Objects</article-title>. <source>IEEE Trans. Cybern.</source> <volume>50</volume>, <fpage>5074</fpage>&#x2013;<lpage>5088</lpage>. <pub-id pub-id-type="doi">10.1109/TCYB.2019.2946090</pub-id> </citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Peng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020b</year>). <article-title>Improved Collision Perception Neuronal System Model With Adaptive Inhibition Mechanism and Evolutionary Learning</article-title>. <source>IEEE Access.</source> <volume>8</volume>, <fpage>108896</fpage>&#x2013;<lpage>108912</lpage>. <pub-id pub-id-type="doi">10.1109/access.2020.3001396</pub-id> </citation>
</ref>
<ref id="B19">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>Bio-inspired Collision Detector With Enhanced Selectivity for Ground Robotic Vision System</article-title>,&#x201d; in <conf-name>British machine vision conference</conf-name>, <conf-loc>New York, United Kingdom</conf-loc>. Editors <person-group person-group-type="editor">
<name>
<surname>Wilson</surname>
<given-names>E. R. H. R C.</given-names>
</name>
<name>
<surname>Smith</surname>
<given-names>W. A. P.</given-names>
</name>
</person-group> (<publisher-name>BMVA Press</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.5244/c.30.6</pub-id> </citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gabbiani</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Krapp</surname>
<given-names>H. G.</given-names>
</name>
<name>
<surname>Hatsopoulos</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Mo</surname>
<given-names>C.-H.</given-names>
</name>
<name>
<surname>Koch</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Laurent</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Multiplication and Stimulus Invariance in a Looming-Sensitive Neuron</article-title>. <source>J.&#x20;Physiology-Paris.</source> <volume>98</volume>, <fpage>19</fpage>&#x2013;<lpage>34</lpage>. <pub-id pub-id-type="doi">10.1016/j.jphysparis.2004.03.001</pub-id> </citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gabbiani</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Krapp</surname>
<given-names>H. G.</given-names>
</name>
<name>
<surname>Koch</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Laurent</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Multiplicative Computation in a Visual Neuron Sensitive to Looming</article-title>. <source>Nature.</source> <volume>420</volume>, <fpage>320</fpage>&#x2013;<lpage>324</lpage>. <pub-id pub-id-type="doi">10.1038/nature01190</pub-id> </citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hartbauer</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Simplified Bionic Solutions: A Simple Bio-Inspired Vehicle Collision Detection System</article-title>. <source>Bioinspir. Biomim.</source> <volume>12</volume>, <fpage>026007</fpage>. <pub-id pub-id-type="doi">10.1088/1748-3190/aa5993</pub-id> </citation>
</ref>
<ref id="B23">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Colias IV: the Affordable Micro Robot Platform With Bio-Inspired Vision</article-title>,&#x201d; in <conf-name>Towards autonomous robotic systems conference</conf-name>. Editors <person-group person-group-type="editor">
<name>
<surname>Giuliani</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Assaf</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Giannaccini</surname>
<given-names>M. E.</given-names>
</name>
</person-group> (<publisher-name>Springer International Publishing</publisher-name>), <fpage>197</fpage>&#x2013;<lpage>208</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-96728-8_17</pub-id> </citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Isakhani</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Aouf</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Kechagias-Stamatis</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Whidborne</surname>
<given-names>J.&#x20;F.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>A Furcated Visual Collision Avoidance System for an Autonomous Micro Robot</article-title>. <source>IEEE Trans. Cogn. Development Syst.</source> <volume>12</volume>, <fpage>1</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1109/TCDS.2018.2858742</pub-id> </citation>
</ref>
<ref id="B25">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Keil</surname>
<given-names>M. S.</given-names>
</name>
<name>
<surname>Roca-Moreno</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Rodriguez-Vazquez</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2004</year>). &#x201c;<article-title>A Neural Model of the Locust Visual System for Detection of Object Approaches With Real-World Scenes</article-title>,&#x201d; in <conf-name>Proceedings of the fourth IASTED international conference on visualization, imaging, and image processing</conf-name>, <conf-loc>Marbella, Spain</conf-loc> (<publisher-name>IASTED</publisher-name>), <fpage>340</fpage>&#x2013;<lpage>345</lpage>. </citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kennedy</surname>
<given-names>J.&#x20;S.</given-names>
</name>
</person-group> (<year>1951</year>). <article-title>The Migration of the Desert Locust (<italic>Schistocerca gregaria</italic> Forsk.) I. The Behaviour of Swarms. II. A Theory of Long-Range Migrations</article-title>. <source>Phil. Trans. R. Soc. Lond. B.</source> <volume>235</volume>, <fpage>163</fpage>&#x2013;<lpage>290</lpage>. <pub-id pub-id-type="doi">10.1098/rstb.1951.0003</pub-id> </citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Krajn&#xed;k</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Nitsche</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Faigl</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Van&#x11b;k</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Saska</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>P&#x159;eu&#x10d;il</surname>
<given-names>L.</given-names>
</name>
<etal/>
</person-group> (<year>2014</year>). <article-title>A Practical Multirobot Localization System</article-title>. <source>J.&#x20;Intell. Robot Syst.</source> <volume>76</volume>, <fpage>539</fpage>&#x2013;<lpage>562</lpage>. <pub-id pub-id-type="doi">10.1007/s10846-014-0041-x</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Krejan</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Trost</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2011</year>). &#x201c;<article-title>LGMD-Based Bio-Inspired Algorithm for Detecting Risk of Collision of a Road Vehicle</article-title>,&#x201d; in <conf-name>Proceedings of the 2011 IEEE 7th international symposium on image and signal processing and analysis</conf-name>, <conf-loc>Dubrovnik, Croatia</conf-loc> (<publisher-name>IEEE</publisher-name>), <fpage>319</fpage>&#x2013;<lpage>324</lpage>. </citation>
</ref>
<ref id="B29">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Isakhani</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Investigating Multiple Pheromones in Swarm Robots - A Case Study of Multi-Robot Deployment</article-title>,&#x201d; in <conf-name>Proceedings of the 2020&#x20;5th International Conference on Advanced Robotics and Mechatronics (ICARM)</conf-name>, <conf-loc>Shenzhen, China</conf-loc> (<publisher-name>IEEE</publisher-name>), <fpage>595</fpage>&#x2013;<lpage>601</lpage>. <pub-id pub-id-type="doi">10.1109/icarm49381.2020.9195311</pub-id> </citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mukhtar</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Xia</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>T. B.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Vehicle Detection Techniques for Collision Avoidance Systems: A Review</article-title>. <source>IEEE Trans. Intell. Transport. Syst.</source> <volume>16</volume>, <fpage>2318</fpage>&#x2013;<lpage>2338</lpage>. <pub-id pub-id-type="doi">10.1109/TITS.2015.2409109</pub-id> </citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>O&#x2019;Shea</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Williams</surname>
<given-names>J.&#x20;L.</given-names>
</name>
</person-group> (<year>1974</year>). <article-title>The Anatomy and Output Connection of a Locust Visual Interneurone; the Lobular Giant Movement Detector (LGMD) Neurone</article-title>. <source>J.&#x20;Comp. Physiol.</source> <volume>91</volume>, <fpage>257</fpage>&#x2013;<lpage>266</lpage>. <pub-id pub-id-type="doi">10.1007/BF00698057</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>O&#x2019;Shea</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Rowell</surname>
<given-names>C. H. F.</given-names>
</name>
</person-group> (<year>1976</year>). <article-title>The Neuronal Basis of a Sensory Analyser, the Acridid Movement Detector System</article-title>. <source>J.&#x20;Exp. Biol.</source> <volume>68</volume>, <fpage>289</fpage>&#x2013;<lpage>308</lpage>. <pub-id pub-id-type="doi">10.1242/jeb.65.2.289</pub-id> </citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
<name>
<surname>Bramwell</surname>
<given-names>D. I.</given-names>
</name>
</person-group> (<year>1996</year>). <article-title>Neural Network Based on the Input Organization of an Identified Neuron Signaling Impending Collision</article-title>. <source>J.&#x20;Neurophysiol.</source> <volume>75</volume>, <fpage>967</fpage>&#x2013;<lpage>985</lpage>. <pub-id pub-id-type="doi">10.1152/jn.1996.75.3.967</pub-id> </citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
<name>
<surname>Simmons</surname>
<given-names>P. J.</given-names>
</name>
</person-group> (<year>1999</year>). <article-title>Seeing What Is Coming: Building Collision-Sensitive Neurones</article-title>. <source>Trends Neurosciences.</source> <volume>22</volume>, <fpage>215</fpage>&#x2013;<lpage>220</lpage>. <pub-id pub-id-type="doi">10.1016/s0166-2236(98)01332-0</pub-id> </citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
<name>
<surname>Wernitznig</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>P&#xf6;lt</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Zankel</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>G&#xfc;tl</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Sztarker</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Two Identified Looming Detectors in the Locust: Ubiquitous Lateral Connections Among Their Inputs Contribute to Selective Responses to Looming Objects</article-title>. <source>Sci. Rep.</source> <volume>6</volume>, <fpage>35525</fpage>. <pub-id pub-id-type="doi">10.1038/srep35525</pub-id> </citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sabzevari</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Scaramuzza</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Multi-body Motion Estimation From Monocular Vehicle-Mounted Cameras</article-title>. <source>IEEE Trans. Robot.</source> <volume>32</volume>, <fpage>638</fpage>&#x2013;<lpage>651</lpage>. <pub-id pub-id-type="doi">10.1109/tro.2016.2552548</pub-id> </citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Salt</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Howard</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Indiveri</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Sandamirskaya</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Parameter Optimization and Learning in a Spiking Neural Network for Uav Obstacle Avoidance Targeting Neuromorphic Processors</article-title>. <source>IEEE Trans. Neural Networks Learn. Syst.</source> <volume>31</volume>, <fpage>3305</fpage>&#x2013;<lpage>3318</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2019.2941506</pub-id> </citation>
</ref>
<ref id="B38">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Salt</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Indiveri</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Sandamirskaya</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Obstacle Avoidance With Lgmd Neuron: Towards a Neuromorphic Uav Implementation</article-title>,&#x201d; in <conf-name>Proceedings of the 2017 IEEE international symposium on circuits and systems (ISCAS)</conf-name>, <conf-loc>Baltimore, MD, USA</conf-loc> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1109/iscas.2017.8050976</pub-id> </citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Serres</surname>
<given-names>J.&#x20;R.</given-names>
</name>
<name>
<surname>Ruffier</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Optic Flow-Based Collision-Free Strategies: From Insects to Robots</article-title>. <source>Arthropod Struct. Development.</source> <volume>46</volume>, <fpage>703</fpage>&#x2013;<lpage>717</lpage>. <pub-id pub-id-type="doi">10.1016/j.asd.2017.06.003</pub-id> </citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Simmons</surname>
<given-names>P. J.</given-names>
</name>
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Responses to Object Approach by a Wide Field Visual Neurone, the LGMD2 of the Locust: Characterization and Image Cues</article-title>. <source>J.&#x20;Comp. Physiol. A: Sensory, Neural Behav. Physiol.</source> <volume>180</volume>, <fpage>203</fpage>&#x2013;<lpage>214</lpage>. <pub-id pub-id-type="doi">10.1007/s003590050041</pub-id> </citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Simmons</surname>
<given-names>P. J.</given-names>
</name>
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
<name>
<surname>Santer</surname>
<given-names>R. D.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Escapes With and Without Preparation: The Neuroethology of Visual Startle in Locusts</article-title>. <source>J.&#x20;Insect Physiol.</source> <volume>56</volume>, <fpage>876</fpage>&#x2013;<lpage>883</lpage>. <pub-id pub-id-type="doi">10.1016/j.jinsphys.2010.04.015</pub-id> </citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sivaraman</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Trivedi</surname>
<given-names>M. M.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Looking at Vehicles on the Road: A Survey of Vision-Based Vehicle Detection, Tracking, and Behavior Analysis</article-title>. <source>IEEE Trans. Intell. Transport. Syst.</source> <volume>14</volume>, <fpage>1773</fpage>&#x2013;<lpage>1795</lpage>. <pub-id pub-id-type="doi">10.1109/tits.2013.2266661</pub-id> </citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Stafford</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Santer</surname>
<given-names>R. D.</given-names>
</name>
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>A Bio-Inspired Visual Collision Detection Mechanism for Cars: Combining Insect Inspired Neurons to Create a Robust System</article-title>. <source>Biosystems.</source> <volume>87</volume>, <fpage>164</fpage>&#x2013;<lpage>171</lpage>. <pub-id pub-id-type="doi">10.1016/j.biosystems.2006.09.010</pub-id> </citation>
</ref>
<ref id="B44">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>ColCOS <italic>&#x3d5;</italic>: A Multiple Pheromone Communication System for Swarm Robotics and Social Insects Research</article-title>,&#x201d; in <conf-name>Proceedings of the 2019 IEEE 4th International Conference on Advanced Robotics and Mechatronics (ICARM)</conf-name>, <conf-loc>Osaka, Japan</conf-loc> (<publisher-name>IEEE</publisher-name>), <fpage>59</fpage>&#x2013;<lpage>66</lpage>. <pub-id pub-id-type="doi">10.1109/icarm.2019.8833989</pub-id> </citation>
</ref>
<ref id="B45">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Bebis</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Miller</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2004</year>). &#x201c;<article-title>On-Road Vehicle Detection Using Optical Sensors: a Review</article-title>,&#x201d; in <conf-name>Proceedings of the 7th International IEEE Conference on Intelligent Transportation Systems</conf-name>, <conf-loc>Washington, WA, USA</conf-loc> <publisher-name>IEEE</publisher-name>, <fpage>585</fpage>&#x2013;<lpage>590</lpage>. </citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sztarker</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>A Look Into the Cockpit of the Developing Locust: Looming Detectors and Predator Avoidance</article-title>. <source>Devel Neurobio.</source> <volume>74</volume>, <fpage>1078</fpage>&#x2013;<lpage>1095</lpage>. <pub-id pub-id-type="doi">10.1002/dneu.22184</pub-id> </citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Webster</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Western</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Araiza-Illan</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Dixon</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Eder</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Fisher</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>A Corroborative Approach to Verification and Validation of Human-Robot Teams</article-title>. <source>Int. J.&#x20;Robotics Res.</source> <volume>39</volume>, <fpage>73</fpage>&#x2013;<lpage>99</lpage>. <pub-id pub-id-type="doi">10.1177/0278364919883338</pub-id> </citation>
</ref>
<ref id="B48">
<citation citation-type="web">
<comment>[Dataset]</comment> <collab>WHO</collab> (<year>2018</year>). <article-title>Global Status Report on Road Safety 2018</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://www.who.int/violence_injury_prevention/road_safety_status/2018/en/">https://www.who.int/violence_injury_prevention/road_safety_status/2018/en/</ext-link>
</comment>. </citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yakubowski</surname>
<given-names>J.&#x20;M.</given-names>
</name>
<name>
<surname>Mcmillan</surname>
<given-names>G. A.</given-names>
</name>
<name>
<surname>Gray</surname>
<given-names>J.&#x20;R.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Background Visual Motion Affects Responses of an Insect Motion-Sensitive Neuron to Objects Deviating From a Collision Course</article-title>. <source>Physiol. Rep.</source> <volume>4</volume>, <fpage>e12801</fpage>. <pub-id pub-id-type="doi">10.14814/phy2.12801</pub-id> </citation>
</ref>
<ref id="B50">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Rind</surname>
<given-names>F. C.</given-names>
</name>
</person-group> (<year>2005</year>). &#x201c;<article-title>A Collision Detection System for a Mobile Robot Inspired by the Locust Visual System</article-title>,&#x201d; in <conf-name>Proceedings of the 2005 IEEE international conference on robotics and automation (ICRA)</conf-name>, <conf-loc>Barcelona, Spain</conf-loc> (<publisher-name>IEEE</publisher-name>), <fpage>3832</fpage>&#x2013;<lpage>3837</lpage>. </citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zehang Sun</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Bebis</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Miller</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>On-Road Vehicle Detection: a Review</article-title>. <source>IEEE Trans. Pattern Anal. Machine Intell.</source> <volume>28</volume>, <fpage>694</fpage>&#x2013;<lpage>711</lpage>. <pub-id pub-id-type="doi">10.1109/tpami.2006.104</pub-id> </citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>An LGMD Based Competitive Collision Avoidance Strategy for Uav</article-title>. <source>Artif. Intelligence Appl. Innov.</source> <volume>6</volume>, <fpage>80</fpage>&#x2013;<lpage>91</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-19823-7_6</pub-id> </citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Evolutionary V2x Technologies Toward the Internet of Vehicles: Challenges and Opportunities</article-title>. <source>Proc. IEEE.</source> <volume>108</volume>, <fpage>308</fpage>&#x2013;<lpage>323</lpage>. <pub-id pub-id-type="doi">10.1109/jproc.2019.2961937</pub-id> </citation>
</ref>
</ref-list>
</back>
</article>