<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2019.00210</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>A New Pulse Coupled Neural Network (PCNN) for Brain Medical Image Fusion Empowered by Shuffled Frog Leaping Algorithm</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Huang</surname> <given-names>Chenxi</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/612069/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Tian</surname> <given-names>Ganxun</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Lan</surname> <given-names>Yisha</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/631504/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Peng</surname> <given-names>Yonghong</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Ng</surname> <given-names>E. Y. K.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Hao</surname> <given-names>Yongtao</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Cheng</surname> <given-names>Yongqiang</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/679425/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Che</surname> <given-names>Wenliang</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/694253/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Computer Science and Technology, Tongji University</institution>, <addr-line>Shanghai</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Faculty of Computer Science, University of Sunderland</institution>, <addr-line>Sunderland</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff3"><sup>3</sup><institution>School of Mechanical and Aerospace Engineering, Nanyang Technological University</institution>, <addr-line>Singapore</addr-line>, <country>Singapore</country></aff>
<aff id="aff4"><sup>4</sup><institution>School of Engineering and Computer Science, University of Hull</institution>, <addr-line>Kingston upon Hull</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff5"><sup>5</sup><institution>Department of Cardiology, Shanghai Tenth People&#x2019;s Hospital, Tongji University School of Medicine</institution>, <addr-line>Shanghai</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Nianyin Zeng, Xiamen University, China</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Ming Zeng, Xiamen University, China; Cheng Wang, Huaqiao University, China; Yingchun Ren, Jiaxing University, China</p></fn>
<corresp id="c001">&#x002A;Correspondence: Yongtao Hao, <email>hao0yt@163.com</email> Yongqiang Cheng, <email>Y.Cheng@hull.ac.uk</email> Wenliang Che, <email>chewenliang@tongji.edu.cn</email></corresp>
<fn fn-type="other" id="fn002"><p>This article was submitted to Brain Imaging Methods, a section of the journal Frontiers in Neuroscience</p></fn></author-notes>
<pub-date pub-type="epub">
<day>20</day>
<month>03</month>
<year>2019</year>
</pub-date>
<pub-date pub-type="collection">
<year>2019</year>
</pub-date>
<volume>13</volume>
<elocation-id>210</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>10</month>
<year>2018</year>
</date>
<date date-type="accepted">
<day>25</day>
<month>02</month>
<year>2019</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2019 Huang, Tian, Lan, Peng, Ng, Hao, Cheng and Che.</copyright-statement>
<copyright-year>2019</copyright-year>
<copyright-holder>Huang, Tian, Lan, Peng, Ng, Hao, Cheng and Che</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>Recent research has reported the application of image fusion technologies in medical images in a wide range of aspects, such as in the diagnosis of brain diseases, the detection of glioma and the diagnosis of Alzheimer&#x2019;s disease. In our study, a new fusion method based on the combination of the shuffled frog leaping algorithm (SFLA) and the pulse coupled neural network (PCNN) is proposed for the fusion of SPECT and CT images to improve the quality of fused brain images. First, the intensity-hue-saturation (IHS) of a SPECT and CT image are decomposed using a non-subsampled contourlet transform (NSCT) independently, where both low-frequency and high-frequency images, using NSCT, are obtained. We then used the combined SFLA and PCNN to fuse the high-frequency sub-band images and low-frequency images. The SFLA is considered to optimize the PCNN network parameters. Finally, the fused image was produced from the reversed NSCT and reversed IHS transforms. We evaluated our algorithms against standard deviation (SD), mean gradient (&#x1E20;), spatial frequency (SF) and information entropy (E) using three different sets of brain images. The experimental results demonstrated the superior performance of the proposed fusion method to enhance both precision and spatial resolution significantly.</p>
</abstract>
<kwd-group>
<kwd>single-photon emission computed tomography image</kwd>
<kwd>computed tomography image</kwd>
<kwd>image fusion</kwd>
<kwd>pulse coupled neural network</kwd>
<kwd>shuffled frog leaping</kwd>
</kwd-group>
<counts>
<fig-count count="7"/>
<table-count count="3"/>
<equation-count count="7"/>
<ref-count count="43"/>
<page-count count="10"/>
<word-count count="0"/>
</counts>
</article-meta>
</front>
<body>
<sec><title>Introduction</title>
<p>In 1895 Rontgen obtained the first human medical image by X-ray, after which research of medical images gained momentum, laying the foundation for medical image fusion. With the development of both medical imaging technology and hardware facilities, a series of medical images with different characteristics and information were obtained, contributing to a key source of information for disease diagnosis. At present, clinical medical images mainly include Computed Tomography (CT) images, Magnetic Resonance Imaging (MRI) images, Single-Photon Emission Computed Tomography (SPECT) images, Dynamic Single-Photon Emission Computed Tomography (DSPECT) and ultrasonic images, etc. (<xref ref-type="bibr" rid="B20">Jodoin et al., 2015</xref>; <xref ref-type="bibr" rid="B10">Hansen et al., 2017</xref>; <xref ref-type="bibr" rid="B41">Zhang J. et al., 2017</xref>). It is necessary to fuse different modes of medical images into more informative images based on fusion algorithms, in order to provide doctors with more reliable information during clinical diagnosis (<xref ref-type="bibr" rid="B24">Kavitha and Chellamuthu, 2014</xref>; <xref ref-type="bibr" rid="B40">Zeng et al., 2014</xref>). At present, medical image fusion has been considered in many aspects, such as the localization of brain diseases, the detection of glioma, the diagnosis of AD (Alzheimer&#x2019;s disease), etc. (<xref ref-type="bibr" rid="B17">Huang, 1996</xref>; <xref ref-type="bibr" rid="B29">Singh et al., 2015</xref>; <xref ref-type="bibr" rid="B35">Zeng et al., 2018</xref>).</p>
<p>Image fusion is the synthesis of images into a new image using a specific algorithm. The space-time relativity and complementarity of information in fused images can be fully used in the process of image fusion, contributing to a more comprehensive expression of the scene (<xref ref-type="bibr" rid="B31">Wu et al., 2005</xref>; <xref ref-type="bibr" rid="B3">Choi, 2006</xref>). Conventional methods of SPECT and CT fusion images mainly include component substitution and multi-resolution analysis (<xref ref-type="bibr" rid="B2">Amolins et al., 2007</xref>; <xref ref-type="bibr" rid="B14">Huang and Du, 2008</xref>; <xref ref-type="bibr" rid="B16">Huang and Jiang, 2012</xref>). Component substitution mainly refers to intensity-hue-saturation (IHS) transform, with the advantage of improving the spatial resolution of SPECT images (<xref ref-type="bibr" rid="B12">Huang, 1999</xref>; <xref ref-type="bibr" rid="B26">Rahmani et al., 2010</xref>). The limitation of transform invariance leads to difficulty in extracting both image contour and edge details. In order to solve this problem, contourlet transform was proposed by <xref ref-type="bibr" rid="B4">Da et al. (2006)</xref>, <xref ref-type="bibr" rid="B43">Zhao et al. (2012)</xref>, <xref ref-type="bibr" rid="B33">Xin and Deng (2013)</xref>. Moreover, non-subsampled contourlet transform (NSCT) was also proposed to fully extract the directional information of SPECT images and CT images to be fused, providing better performance in image decomposition (<xref ref-type="bibr" rid="B4">Da et al., 2006</xref>; <xref ref-type="bibr" rid="B30">Wang and Zhou, 2010</xref>; <xref ref-type="bibr" rid="B34">Yang et al., 2016</xref>).</p>
<p>The Pulse Coupled Neural Network (PCNN) was discovered by <xref ref-type="bibr" rid="B8">Eckhorn et al. (1989)</xref> in the 1990s while studying the imaging mechanisms of the visual cortex of small mammals. No training process is required in the PCNN and useful information can be obtained from a complex background through the PCNN. Nevertheless, the PCNN has its shortcomings, such as the numerous parameters and the complicated process of setting parameters. Thus, novel algorithms to optimize the PCNN parameters has been introduced to improve the calculation speed of PCNN (<xref ref-type="bibr" rid="B13">Huang, 2004</xref>; <xref ref-type="bibr" rid="B15">Huang et al., 2004</xref>; <xref ref-type="bibr" rid="B19">Jiang et al., 2014</xref>; <xref ref-type="bibr" rid="B32">Xiang et al., 2015</xref>). SFLA is a new heuristic algorithm first presented by Eusuff and Lansey, which combines the advantages of the memetic algorithm and particle swarm optimization. The algorithm can search and analyze the optimal value in a complex space with fewer parameters and has a higher performance and robustness (<xref ref-type="bibr" rid="B27">Samuel and Asir Rajan, 2015</xref>; <xref ref-type="bibr" rid="B28">Sapkheyli et al., 2015</xref>; <xref ref-type="bibr" rid="B23">Kaur and Mehta, 2017</xref>).</p>
<p>In our study, a new fusion approach based on the SFLA and PCNN is proposed to address the limitations discussed above. Our proposed method not only innovatively uses SFLA optimization to effectively learn the PCNN parameters, but also produces high quality fused images. A series of contrasting experiments are discussed in view of image quality and objective evaluations.</p>
<p>The remaining part of the paper is organized as follows. Related work is introduced in Section &#x201C;Related Works.&#x201D; The fusion method is proposed in Section &#x201C;Materials and Methods.&#x201D; The experimental results are presented in Sections &#x201C;Result&#x201D; and &#x201C;Conclusion&#x201D; concludes the paper with an outlook on future work.</p>
</sec>
<sec><title>Related Works</title>
<p>Image fusion involves a wide range of disciplines and can be classified under the category of information fusion, where a series of methods have been presented. A novel fusion method, for multi-scale images has been presented by <xref ref-type="bibr" rid="B42">Zhang X. et al. (2017)</xref> using Empirical Wavelet Transform (EWT). In the proposed method, simultaneous empirical wavelet transforms (SEWT) were used for one-dimensional and two-dimensional signals, to ensure the optimal wavelets for processed signals. A satisfying visual perception was achieved through a series of experiments and in terms of objective evaluations, it was demonstrated that the method was superior to other traditional algorithms. However, time consumption of the proposed method is high, mainly during the process of image decomposition, causing application difficulties in a real time system. Noised images should also be considered in future work where the process of generating optimal wavelets may be affected (<xref ref-type="bibr" rid="B39">Zeng et al., 2016b</xref>; <xref ref-type="bibr" rid="B42">Zhang X. et al., 2017</xref>).</p>
<p><xref ref-type="bibr" rid="B1">Aishwarya and Thangammal (2017)</xref> also proposed a fusion method based on a supervised dictionary learning approach. During the dictionary training, in order to reduce the number of input patches, gradient information was first obtained for every patch in the training set. Second, both the information content and edge strength was measured for each gradient patch. Finally, the patches with better focus features were selected by a selection rule, to train the over complete dictionary. Additionally, in the process of fusion, the globally learned dictionary was used to achieve better visual quality. Nevertheless, high computational costs also exist in this proposed approach during the process of sparse coding and final fusion performance, which may be affected by high frequency noise (<xref ref-type="bibr" rid="B38">Zeng et al., 2016a</xref>; <xref ref-type="bibr" rid="B1">Aishwarya and Thangammal, 2017</xref>).</p>
<p>Moreover, an algorithm for the fusion of thermal and visual images was introduced by M Kanmani et al. in order to obtain a single comprehensive fused image. A novel method called self tuning particle swarm optimization (STPSO) was presented to calculate the optimal weights. A weighted averaging fusion rule was also used to fuse the low frequency- and high frequency coefficients, obtained through Dual Tree Discrete Wavelet Transform (DT-DWT) (<xref ref-type="bibr" rid="B22">Kanmani and Narasimhan, 2017</xref>; <xref ref-type="bibr" rid="B36">Zeng et al., 2017a</xref>). Xinxia Ji et al. proposed a new fusion algorithm based on an adaptive weighted method in combination with the idea of fuzzy theory. In the algorithm, a membership function with fuzzy logic variables were designed to achieve the transformation of different leveled coefficients by different weights. Experimental results indicated that the proposed algorithm outperformed existing algorithms in aspects of visual quality and objective measures (<xref ref-type="bibr" rid="B18">Ji and Zhang, 2017</xref>; <xref ref-type="bibr" rid="B37">Zeng et al., 2017b</xref>).</p>
</sec>
<sec id="s1" sec-type="materials|methods">
<title>Materials and Methods</title>
<sec><title>The Image Fusion Method Based on PCNN and SFLA</title>
<p>The algorithm 3.1 represents an image fusion algorithm based on the PCNN and SFLA, where SPECT and CT images are fused. In our proposed algorithm, a SPECT image is first decomposed on three components using IHS transform, which include saturation S, hue H and intensity I. Component I is then decomposed to a low-frequency and high-frequency image through NSCT decomposition. Additionally, a CT image is decomposed into a low-frequency and high-frequency image through NSCT decomposition. Moreover, the two low-frequency images obtained above are fused in a new low-frequency image through the SFLA and PCNN combination fusion rules, while the two high-frequency images obtained above are fused into a new high-frequency image through the SFLA and PCNN combination fusion rules. Next, the new low-frequency and new high-frequency images are fused to generate a new image with intensity I&#x2019; using reversed NSCT. Finally, the target image is achieved by using reversed IHS transform to integrate the three components S, H and I&#x2019;.</p>
<table-wrap>
<table>
<thead>
<tr>
<th valign="top" align="left"><hr/>Algorithm 1: An image fusion algorithm based on PCNN and SFLA<hr/></th></tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Input: A SPECT image A and a CT image B</td></tr>
<tr>
<td valign="top" align="left">Output: A fused image F</td></tr>
<tr>
<td valign="top" align="left">Step 1: Obtain three components of image A using IHS transform; saturation S, hue H and intensity I.</td></tr>
<tr>
<td valign="top" align="left">Step 2: Image decomposition</td></tr>
<tr>
<td valign="top" align="left">(1) Decompose the component I of image A to a low-frequency image AL and high-frequency image AH through NSCT decomposition.</td></tr>
<tr>
<td valign="top" align="left">(2) Decompose image B to a low-frequency image BL and high-frequency image BH through NSCT decomposition.</td></tr>
<tr>
<td valign="top" align="left">Step 3: Image fusion</td></tr>
<tr>
<td valign="top" align="left">(1) Fuse the low-frequency images AL and BL to a new low-frequency image CL through the SFLA and PCNN combination fusion rules.</td></tr>
<tr>
<td valign="top" align="left">(2) Fuse the high-frequency images AH and BH to form a new high-frequency image CH through the SFLA and PCNN combination fusion rules.</td></tr>
<tr>
<td valign="top" align="left">Step 4: Inverse transform</td></tr>
<tr>
<td valign="top" align="left">Fuse the low-frequency image CL and high-frequency image CH to a new image with intensity I&#x2019; using reversed NSCT.</td></tr>
<tr>
<td valign="top" align="left">Step 5: Reversed IHS transform</td></tr>
<tr>
<td valign="top" align="left">Through the reversed IHS transform, integrate the three components S, H and I&#x2019;, then obtain the target image F.<hr/></td></tr>
</tbody>
</table>
</table-wrap>
<p>The overall method of the proposed algorithm for the fusion of a SPECT and CT image is outlined in <xref ref-type="fig" rid="F1">Figure 1</xref>.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption><p>The proposed method for the process of fusion.</p></caption>
<graphic xlink:href="fnins-13-00210-g001.tif"/>
</fig>
</sec>
<sec><title>Decomposition Rule</title>
<p>In our proposed method, the SPECT image and CT image are decomposed into a low-frequency and high-frequency image using NSCT.</p>
<p>Non-subsampled contourlet transform (<xref ref-type="bibr" rid="B12">Huang, 1999</xref>; <xref ref-type="bibr" rid="B26">Rahmani et al., 2010</xref>) is composed of a non-subsampled pyramid filter bank (NSPFB) and a non-subsampled directional filter bank (NSDFB). The source image is decomposed into a high-frequency sub-band and a low-frequency sub-band by NSPFB. The high-frequency sub-band is then decomposed into a sub-band of each direction by NSDFB. The structure diagram of the two-level decomposition of NSCT is shown in <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption><p>The structure diagram of the two-level decomposition of NSCT.</p></caption>
<graphic xlink:href="fnins-13-00210-g002.tif"/>
</fig>
<p>An analysis filter {H<sub>1</sub> (z), H<sub>2</sub> (z)} and a synthesis filter {G<sub>1</sub> (z), G<sub>2</sub> (z)} are used when using NSCT to decompose images and the two filters satisfy H<sub>1</sub>(z)G<sub>1</sub>(z) + H<sub>2</sub>(z)G<sub>2</sub>(z) = 1. The source image can generate low-frequency and high-frequency sub-band images when it is decomposed by NSP. The next level of NSP decomposition is performed on low-frequency components obtained by the upper-level decomposition. An analysis filter {U<sub>1</sub> (z), U<sub>2</sub> (z)} and synthesis filters {V<sub>1</sub> (z), V<sub>2</sub> (z)} are contained in the design structure of NSDFB with the requirement of U<sub>1</sub>(z)V <sub>1</sub>(z) + U<sub>2</sub>(z)V <sub>2</sub>(z) = 1. The high-pass sub-band image decomposed by J-level NSP is decomposed by L-level NSDFB, and the high-frequency sub-band coefficients can be obtained at the number of 2<sup>n</sup>, where n is an integer higher than 0. A fused image with clearer contours and translation invariants can be obtained through the fusion method based on NSCT (<xref ref-type="bibr" rid="B33">Xin and Deng, 2013</xref>).</p>
</sec>
<sec><title>Fusion Rule</title>
<p>Fusion rules affect image performance, so the selection of fusion rules largely determines the quality of the final fused image. In this section, the PCNN fusion algorithm based on SFLA is introduced for low-frequency and high-frequency sub-band images decomposed by NSCT.</p>
<sec><title>Pulse Coupled Neural Network</title>
<p>The PCNN is a neural network model of single-cortex feedback, to simulate the processing mechanism of visual signals in the cerebral cortex of cats. It consists of several neurons connected to each other, where each neuron is composed of three parts: the receiving domain, the coupled linking modulation domain and the pulse generator. In image fusion using the PCNN, the M <sup>&#x2217;</sup> N neurons of a two-dimensional PCNN network correspond to the M <sup>&#x2217;</sup> N pixels of the two-dimensional input image, and the gray value of the pixel is taken as the external stimulus of the network neuron. Initially, the internal activation of neurons is equal to the external stimulation. When the external stimulus is greater than the threshold value, a natural ignition will occur. When a neuron ignites, its threshold will increase sharply and then decay exponentially with time. When the threshold attenuates to less than the corresponding internal activation, the neuron will ignite again, and the neuron will generate a pulse sequence signal. The ignited neurons stimulate the ignition of adjacent neurons by interacting with adjacent neurons, thereby generating an automatic wave in the activation region to propagate outward (<xref ref-type="bibr" rid="B9">Ge et al., 2009</xref>).</p>
<p>The parameters of the PCNN affect the quality of image fusion, and most current research uses the method of regressively exploring the values of parameters, which is subjective to a certain degree. Therefore, how to reasonably set the parameters of the PCNN is the key to improving its performance. In our paper, SFLA is used to optimize the PCNN network parameters.</p>
</sec>
<sec><title>Shuffled Frog Leaping Algorithm</title>
<p>Shuffled frog leaping algorithm is a particle swarm search method based on groups to obtain optimal results. The flowchart of SFLA is shown in <xref ref-type="fig" rid="F3">Figure 3</xref>. First, the population size F, the number of sub populations m, the maximum iterations of local search for each sub population N and the number of frogs in each sub population n were defined. Second, a population was initialed, and the fitness value of each frog was calculated and sorted in a descending order. A memetic algorithm is used in the process of the search, and the search is carried out in groups. All groups are then fused, and the frogs are sorted according to an established rule. Moreover, the frog population is divided based on the established rules, and the overall information exchange is achieved using this method until the number of iterations are equal to the maximum iterations N (<xref ref-type="bibr" rid="B25">Li et al., 2018</xref>).</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption><p>The flowchart of the shuffled frog leaping algorithm.</p></caption>
<graphic xlink:href="fnins-13-00210-g003.tif"/>
</fig>
<p>F(x) is defined as a fitness function and &#x03A9; is a feasible domain. In each iteration, P<sub>g</sub> is the best frog for a frog population, P<sub>b</sub> represents the best frog for each group and P<sub>w</sub> is the worst frog for each group. The algorithm adopts the following update strategy to carry out a local search in each group:</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M1"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable columnalign='left'><mml:mtr columnalign='left'><mml:mtd columnalign='left'><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mo>j</mml:mo></mml:msub><mml:mo>=</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x00B7;</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mo>b</mml:mo></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mo>w</mml:mo></mml:msub><mml:mo stretchy='false'>)</mml:mo><mml:mo>,</mml:mo><mml:mo>&#x2003;</mml:mo><mml:mo>-</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>max</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2264;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mo>j</mml:mo></mml:msub><mml:mo>&#x2264;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>max</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mtd></mml:mtr><mml:mtr columnalign='left'><mml:mtd columnalign='left'><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mo>w</mml:mo><mml:mo>,</mml:mo><mml:mo>new</mml:mo></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mo>w</mml:mo></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mo>j</mml:mo></mml:msub></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>
<p>where S<sub>j</sub> represents the updated value of frog leaping, rand () is defined as the random number between 0 and 1, S<sub>max</sub> is described as the maximum leaping value, and P<sub>w,new</sub> is the worst frog of updated group. If P<sub>w,new</sub> &#x2208; &#x03A9; and F(P<sub>w,new</sub>) > F(P<sub>w</sub>), P<sub>w</sub> can be replaced by P<sub>w,new</sub>, otherwise, P<sub>b</sub> will be replaced by P<sub>g</sub>. At the same time, if P&#x2032;<sub>w,new</sub> &#x2208; &#x03A9; and F(P&#x2032;<sub>w,new</sub>) > F(P<sub>w</sub>), P<sub>w</sub> can be replaced by P&#x2032;<sub>w,new</sub>, otherwise P<sub>w</sub> can be replaced by a new frog and then the process of iteration will continue until the maximum iterations is reached.</p>
</sec>
<sec><title>PCNN Fusion Algorithm Based on SFLA</title>
<p>Three parameters &#x03B1;<sub>&#x1D703;</sub>,&#x03B2; and V <sub>&#x1D703;</sub> in PCNN are essential for the results of image fusion. Therefore, as it is shown in <xref ref-type="fig" rid="F4">Figure 4</xref>, in our study, the SFLA is used to optimize the PCNN in order to achieve the optimal solution of the PCNN parameters. Each frog is defined as a spatial solution X(&#x03B1;<sub>&#x1D703;</sub>,&#x03B2;,V <sub>&#x1D703;</sub>) and the optimal configuration scheme of the PCNN parameters can finally be obtained by searching for the best frog X<sub>b</sub>(&#x03B1;<sub>&#x1D703;</sub>,&#x03B2;,V <sub>&#x1D703;</sub>).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption><p>The process of PCNN parameter optimization based on SFLA.</p></caption>
<graphic xlink:href="fnins-13-00210-g004.tif"/>
</fig>
<p>In our proposed method, possible configuration schemes of parameters are defined, which constitute a solution space for the parameter optimization. After generating an initial frog solution space, F frogs in the population are divided into m groups, and each group is dependent on one another. Starting from the initial solution, the frogs in each group first carry out an intraclass optimization by a local search, thereby continuously updating their own fitness values. In N iterations of local optimization, the quality of the whole frog population is optimized with the improvement of the quality of frogs in all groups. The frogs of the population are then fused and regrouped according to the established rule, and local optimization within the group is carried out until reaching the final iteration conditions. Finally, the global optimal solution of the frog population is defined as the optimal PCNN parameter configuration. The final fusion image is thus obtained using the optimal parameter configuration above.</p>
</sec>
</sec></sec>
<sec><title>Results</title>
<p>In order to verify the accuracy and preservation of the edge details in our proposed method, three sets of CT and SPECT images were fused based on our method. The results of each set were compared with four fusion methods; IHS, NSCT+FL, DWT, NSCT+PCNN. In the method of NSCT+FL, images are first decomposed by NSCT to obtain high-frequency and low-frequency coefficients, and then fusion images are obtained by taking large value high-frequency coefficients and taking average value low-frequency coefficients. In NSCT+PCNN, images are decomposed by NSCT and fused by the PCNN.</p>
<sec><title>Subjective Evaluations of Experimental Results</title>
<p>Experiments were implemented on the image database from the Whole Brain Web Site of Harvard Medical School (<xref ref-type="bibr" rid="B21">Johnson and Becker, 2001</xref>) which contains two groups of images including CT and SPECT images. Each group has three examples including normal brain images, glioma brain images and brain images of patients diagnosed with Alzheimer&#x2019;s disease. The testing images have been used in many related papers (<xref ref-type="bibr" rid="B5">Du et al., 2016a</xref>,<xref ref-type="bibr" rid="B6">b</xref>,<xref ref-type="bibr" rid="B7">c</xref>) and the platform is MATLAB R2018a.</p>
<p>A series of fusion results of SPECT and CT images, based on different methods including IHS, NSCT+FL, DWT, NSCT+PCNN, and our proposed method is shown in <xref ref-type="fig" rid="F5">Figure 5</xref>&#x2013;<xref ref-type="fig" rid="F7">7</xref>. The fusion results of a set of normal brain images are shown in <xref ref-type="fig" rid="F5">Figure 5</xref>, the fusion results of a set of glioma brain images are presented in <xref ref-type="fig" rid="F6">Figure 6</xref>, while a set of brain images of patients diagnosed with Alzheimer&#x2019;s disease are shown in <xref ref-type="fig" rid="F7">Figure 7</xref>. In <xref ref-type="fig" rid="F5">Figure 5</xref>&#x2013;<xref ref-type="fig" rid="F7">7</xref>, (a), (h) and (o) are source CT images; (b), (i), (p) are source SPECT images; (c), (j) and (q) are fused images based on IHS; (d), (k) and (r) are fused images based on NSCT+FL; (e), (l) and (s) are fused images based on DWT; (f), (m) and (t) are fused images based on the combination of NSCT+PCNN; (g), (n) and (u) are fused images based on the proposed method. It can be seen that the fusion results based on our proposed method are more accurate and clearer than those based on various other methods. Our proposed method contributes to a higher brightness of fusion images and more information on the edge details.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption><p>A series of contrasting experiments for normal brain images on fusion images based on different fusion methods (set 1). <bold>(A,H,O)</bold> are source CT images; <bold>(B,I,P)</bold> are source SPECT images; <bold>(C,J,Q)</bold> are fused images based on IHS; <bold>(D,K,R)</bold> are fused images based on NSCT+FL; <bold>(E,L,S)</bold> are fused images based on DWT; <bold>(F,M,T)</bold> are fused images based on the combination of NSCT+PCNN; <bold>(G,N,U)</bold> are fused images based on the proposed method.</p></caption>
<graphic xlink:href="fnins-13-00210-g005.tif"/>
</fig>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption><p>A series of contrasting experiments for glioma brain images on fusion images based on different fusion methods (set 2). <bold>(A,H,O)</bold> are source CT images; <bold>(B,I,P)</bold> are source SPECT images; <bold>(C,J,Q)</bold> are fused images based on IHS; <bold>(D,K,R)</bold> are fused images based on NSCT+FL; <bold>(E,L,S)</bold> are fused images based on DWT; <bold>(F,M,T)</bold> are fused images based on the combination of NSCT+PCNN; <bold>(G,N,U)</bold> are fused images based on the proposed method.</p></caption>
<graphic xlink:href="fnins-13-00210-g006.tif"/>
</fig>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption><p>A series of contrasting experiments for brain images of patients diagnosed with Alzheimer&#x2019;s disease on fusion images based on different fusion methods (set 3). <bold>(A,H,O)</bold> are source CT images; <bold>(B,I,P)</bold> are source SPECT images; <bold>(C,J,Q)</bold> are fused images based on IHS; <bold>(D,K,R)</bold> are fused images based on NSCT+FL; <bold>(E,L,S)</bold> are fused images based on DWT; <bold>(F,M,T)</bold> are fused images based on the combination of NSCT+PCNN; <bold>(G,N,U)</bold> are fused images based on the proposed method.</p></caption>
<graphic xlink:href="fnins-13-00210-g007.tif"/>
</fig>
</sec>
<sec><title>Objective Evaluations of Experimental Results</title>
<p>A set of metrics is used to compare the performance of the fusion methods including IHS, DWT, NSCT, PCNN, a combination of NSCT and the PCNN, and our proposed method. The evaluation metrics including standard deviation (SD), mean gradient (&#x1E20;), spatial frequency (SF) and information entropy (E) are entailed as follows (<xref ref-type="bibr" rid="B11">Huang et al., 2018</xref>):</p>
<list list-type="simple" prefix-word="simple">
<list-item><label>(1)</label><p>Standard deviation</p></list-item>
<list-item><p>Standard deviation is used to evaluate the contrast of the fused image, which is defined as</p></list-item></list>
<disp-formula id="E2"><label>(2)</label><mml:math id="M2"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>&#x03C3;</mml:mi><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>M</mml:mi></mml:munderover><mml:mrow><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:munderover><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>Z</mml:mi><mml:mo stretchy='false'>(</mml:mo></mml:mrow></mml:mstyle></mml:mrow></mml:mstyle><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mover accent='true'><mml:mi>Z</mml:mi><mml:mo>&#x00AF;</mml:mo></mml:mover><mml:msup><mml:mo stretchy='false'>)</mml:mo><mml:mo>2</mml:mo></mml:msup><mml:mo>/</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:mi>M</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>N</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:msqrt></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>
<list list-type="simple" prefix-word="simple"><list-item><p>where Z(i,j) represents the pixel value of the fused image and <inline-formula><mml:math id="M11"><mml:mover accent='true'><mml:mi>Z</mml:mi><mml:mo>&#x00AF;</mml:mo></mml:mover></mml:math></inline-formula> is the mean value of the pixel values of the image.</p></list-item>
<list-item><p>The SD reflects the discrete image gray scale relative to the mean value of gray scale. And a higher value of SD demonstrates the performance of a fused image.</p></list-item>
<list-item><label>(2)</label><p>Mean gradient (&#x1E20;)</p></list-item>
<list-item><p>&#x1E20; corresponds to the ability of a fused image to represent the contrast of tiny details sensitively. It can be mathematically described as</p></list-item></list>
<disp-formula id="E3"><label>(3)</label><mml:math id="M3"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mover accent='true'><mml:mi>G</mml:mi><mml:mo>&#x00AF;</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>M</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:mfrac><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munderover><mml:mrow><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munderover><mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:msqrt><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mfrac><mml:mrow><mml:mo>&#x2202;</mml:mo><mml:mi>Z</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x2202;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mfrac></mml:mrow><mml:msup><mml:mo stretchy='false'>)</mml:mo><mml:mo>2</mml:mo></mml:msup><mml:mo>+</mml:mo><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mfrac><mml:mrow><mml:mo>&#x2202;</mml:mo><mml:mi>Z</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x2202;</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mfrac></mml:mrow><mml:msup><mml:mo stretchy='false'>)</mml:mo><mml:mo>2</mml:mo></mml:msup><mml:mo stretchy='false'>)</mml:mo><mml:mo>/</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msqrt></mml:mrow></mml:mstyle></mml:mrow></mml:mstyle></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>
<list list-type="simple" prefix-word="simple">
<list-item><p>The fused image is clearer when the value of mean gradient is higher.</p></list-item>
<list-item><label>(3)</label><p>Spatial frequency (SF)</p></list-item>
<list-item><p>Spatial frequency is the measure of the overall activity in a fused image. For an image with a gray value Z(x<sub>i</sub>,y<sub>j</sub>) at position (x<sub>i</sub>,y<sub>j</sub>), the spatial frequency is defined as</p></list-item></list>
<disp-formula id="E4"><label>(4)</label><mml:math id="M4"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>S</mml:mi><mml:mi>F</mml:mi><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mi>R</mml:mi><mml:msup><mml:mi>F</mml:mi><mml:mo>2</mml:mo></mml:msup><mml:mo>+</mml:mo><mml:mi>C</mml:mi><mml:msup><mml:mi>F</mml:mi><mml:mo>2</mml:mo></mml:msup></mml:mrow></mml:msqrt></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>
<list list-type="simple" prefix-word="simple"><list-item><p>Where row frequency</p></list-item></list>
<disp-formula id="E5"><label>(5)</label><mml:math id="M5"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>R</mml:mi><mml:mi>F</mml:mi><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>N</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>M</mml:mi></mml:munderover></mml:mstyle><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:munderover></mml:mstyle><mml:msup><mml:mrow><mml:mo>[</mml:mo><mml:mi>Z</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x00A0;</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo>)-</mml:mo><mml:mi>Z</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy='false'>)</mml:mo><mml:mo stretchy='false'>]</mml:mo></mml:mrow><mml:mo>2</mml:mo></mml:msup></mml:mrow></mml:msqrt></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>
<list list-type="simple" prefix-word="simple"><list-item><p>Column frequency</p></list-item></list>
<disp-formula id="E6"><label>(6)</label><mml:math id="M6"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mo>CF=</mml:mo><mml:msqrt><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>N</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mi>M</mml:mi></mml:munderover></mml:mstyle><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:munderover></mml:mstyle><mml:msup><mml:mrow><mml:mo>[</mml:mo><mml:mi>Z</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x00A0;</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo>)-</mml:mo><mml:mi>Z</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo stretchy='false'>)</mml:mo><mml:mo stretchy='false'>]</mml:mo></mml:mrow><mml:mo>2</mml:mo></mml:msup></mml:mrow></mml:msqrt></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>
<list list-type="simple" prefix-word="simple"><list-item><p>The higher the value of frequency, the better the fused image quality.</p></list-item>
<list-item><label>(4)</label><p>Information entropy (E)</p></list-item>
<list-item><p>Information entropy is provided by the below equation</p></list-item></list>
<disp-formula id="E7"><label>(7)</label><mml:math id="M7"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munderover><mml:mrow><mml:msub><mml:mi>p</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mstyle><mml:msub><mml:mrow><mml:mi>log</mml:mi></mml:mrow><mml:mo>2</mml:mo></mml:msub><mml:msub><mml:mi>p</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>
<list list-type="simple" prefix-word="simple"><list-item><p>where L is image gray scale and Pi is the proportion of the pixel of the gray value i in whole pixels. A higher value of entropy indicates more information contained in the fused image.</p></list-item></list>
<p>Experiment results on fused images of SPECT images and CT images are shown in <xref ref-type="table" rid="T1">Tables 1</xref>&#x2013;<xref ref-type="table" rid="T3">3</xref>. The fusion results of a set of normal brain images are shown in <xref ref-type="table" rid="T1">Table 1</xref>, the fusion results of a set of glioma brain images are presented in <xref ref-type="table" rid="T2">Table 2</xref>, while a set of brain images of patients diagnosed with Alzheimer&#x2019;s disease are shown <xref ref-type="table" rid="T3">Table 3</xref>. It can be seen that compared to other fusion methods, our proposed method generally has higher values in SD, &#x1E20;, SF and E. The experimental results demonstrate that information of fusion images obtained by our proposed method is more abundant, the inheritance of detail information performs better, while the resolution is significantly improved.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Performance evaluations on normal brain fused images based on different methods.</p></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="groups">
<thead>
<tr>
<td valign="top" align="left"></td>
<th valign="top" align="center">Metric</th>
<th valign="top" align="center">IHS</th>
<th valign="top" align="center">NSCT+FL</th>
<th valign="top" align="center">DWT</th>
<th valign="top" align="center">NSCT+PCNN</th>
<th valign="top" align="center">Proposed</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Set 1</td>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">51.6141</td>
<td valign="top" align="center">55.2178</td>
<td valign="top" align="center">42.5312</td>
<td valign="top" align="center">57.1188</td>
<td valign="top" align="center">57.2258</td></tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Mean gradient</td>
<td valign="top" align="center">8.8561</td>
<td valign="top" align="center">8.714</td>
<td valign="top" align="center">6.2027</td>
<td valign="top" align="center">8.8568</td>
<td valign="top" align="center">8.8071</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Spatial frequency</td>
<td valign="top" align="center">33.5851</td>
<td valign="top" align="center">33.2324</td>
<td valign="top" align="center">22.0093</td>
<td valign="top" align="center">33.7566</td>
<td valign="top" align="center">33.6546</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Information entropy</td>
<td valign="top" align="center">2.6859</td>
<td valign="top" align="center">2.7565</td>
<td valign="top" align="center">3.0483</td>
<td valign="top" align="center">2.7729</td>
<td valign="top" align="center">3.0621</td>
</tr>
<tr>
<td valign="top" align="left">Set 2</td>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">43.278</td>
<td valign="top" align="center">49.5989</td>
<td valign="top" align="center">43.0915</td>
<td valign="top" align="center">52.9246</td>
<td valign="top" align="center">53.1691</td></tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Mean gradient</td>
<td valign="top" align="center">6.686</td>
<td valign="top" align="center">6.6633</td>
<td valign="top" align="center">4.5622</td>
<td valign="top" align="center">6.5672</td>
<td valign="top" align="center">6.7489</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Spatial frequency</td>
<td valign="top" align="center">20.3855</td>
<td valign="top" align="center">19.9558</td>
<td valign="top" align="center">12.7416</td>
<td valign="top" align="center">19.8214</td>
<td valign="top" align="center">20.0956</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Information entropy</td>
<td valign="top" align="center">3.6325</td>
<td valign="top" align="center">3.9243</td>
<td valign="top" align="center">4.2501</td>
<td valign="top" align="center">3.8386</td>
<td valign="top" align="center">3.9424</td>
</tr>
<tr>
<td valign="top" align="left">Set 3</td>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">50.0926</td>
<td valign="top" align="center">55.7124</td>
<td valign="top" align="center">47.4476</td>
<td valign="top" align="center">57.1246</td>
<td valign="top" align="center">57.1268</td></tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Mean gradient</td>
<td valign="top" align="center">6.2153</td>
<td valign="top" align="center">6.1775</td>
<td valign="top" align="center">4.1822</td>
<td valign="top" align="center">6.086</td>
<td valign="top" align="center">6.1796</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Spatial frequency</td>
<td valign="top" align="center">19.244</td>
<td valign="top" align="center">18.9682</td>
<td valign="top" align="center">12.0096</td>
<td valign="top" align="center">18.7269</td>
<td valign="top" align="center">18.7335</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Information entropy</td>
<td valign="top" align="center">3.6226</td>
<td valign="top" align="center">3.7122</td>
<td valign="top" align="center">4.0074</td>
<td valign="top" align="center">3.7139</td>
<td valign="top" align="center">3.7399</td></tr>
</tbody></table>
</table-wrap>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Performance evaluations on glioma brain fused images based on different methods.</p></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="groups">
<thead>
<tr>
<td valign="top" align="left"></td>
<th valign="top" align="center">Metric</th>
<th valign="top" align="center">IHS</th>
<th valign="top" align="center">NSCT+FL</th>
<th valign="top" align="center">DWT</th>
<th valign="top" align="center">NSCT+PCNN</th>
<th valign="top" align="center">Proposed</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Set 1</td>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">41.7514</td>
<td valign="top" align="center">55.2055</td>
<td valign="top" align="center">39.8132</td>
<td valign="top" align="center">58.0374</td>
<td valign="top" align="center">58.3122</td></tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Mean gradient</td>
<td valign="top" align="center">5.2953</td>
<td valign="top" align="center">5.5442</td>
<td valign="top" align="center">3.8166</td>
<td valign="top" align="center">5.459</td>
<td valign="top" align="center">5.5678</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Spatial frequency</td>
<td valign="top" align="center">16.2064</td>
<td valign="top" align="center">16.5277</td>
<td valign="top" align="center">10.1649</td>
<td valign="top" align="center">16.466</td>
<td valign="top" align="center">16.4776</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Information entropy</td>
<td valign="top" align="center">3.9255</td>
<td valign="top" align="center">4.1433</td>
<td valign="top" align="center">4.6303</td>
<td valign="top" align="center">4.08</td>
<td valign="top" align="center">4.1788</td>
</tr>
<tr>
<td valign="top" align="left">Set 2</td>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">44.154</td>
<td valign="top" align="center">55.5879</td>
<td valign="top" align="center">42.436</td>
<td valign="top" align="center">57.7284</td>
<td valign="top" align="center">57.775</td></tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Mean gradient</td>
<td valign="top" align="center">6.2881</td>
<td valign="top" align="center">6.6316</td>
<td valign="top" align="center">4.595</td>
<td valign="top" align="center">6.535</td>
<td valign="top" align="center">6.7276</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Spatial frequency</td>
<td valign="top" align="center">17.6675</td>
<td valign="top" align="center">17.9369</td>
<td valign="top" align="center">11.359</td>
<td valign="top" align="center">17.9359</td>
<td valign="top" align="center">17.9095</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Information entropy</td>
<td valign="top" align="center">4.3966</td>
<td valign="top" align="center">4.7513</td>
<td valign="top" align="center">5.1901</td>
<td valign="top" align="center">4.6312</td>
<td valign="top" align="center">4.837</td>
</tr>
<tr>
<td valign="top" align="left">Set 3</td>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">48.6572</td>
<td valign="top" align="center">54.0708</td>
<td valign="top" align="center">41.78</td>
<td valign="top" align="center">56.2065</td>
<td valign="top" align="center">56.3546</td></tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Mean gradient</td>
<td valign="top" align="center">6.8855</td>
<td valign="top" align="center">6.8515</td>
<td valign="top" align="center">4.8166</td>
<td valign="top" align="center">6.774</td>
<td valign="top" align="center">6.7977</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Spatial frequency</td>
<td valign="top" align="center">27.8964</td>
<td valign="top" align="center">27.8583</td>
<td valign="top" align="center">17.8725</td>
<td valign="top" align="center">27.7365</td>
<td valign="top" align="center">27.7654</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Information entropy</td>
<td valign="top" align="center">2.4852</td>
<td valign="top" align="center">2.5749</td>
<td valign="top" align="center">2.8442</td>
<td valign="top" align="center">2.5239</td>
<td valign="top" align="center">2.658</td></tr>
</tbody></table>
</table-wrap>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Performance evaluations on fused brain images of patients diagnosed with Alzheimer&#x2019;s disease, based on different methods.</p></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="groups">
<thead>
<tr>
<td valign="top" align="left"></td>
<th valign="top" align="center">Metric</th>
<th valign="top" align="center">IHS</th>
<th valign="top" align="center">NSCT+FL</th>
<th valign="top" align="center">DWT</th>
<th valign="top" align="center">NSCT+PCNN</th>
<th valign="top" align="center">Proposed</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Set 1</td>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">66.1357</td>
<td valign="top" align="center">65.3766</td>
<td valign="top" align="center">51.0336</td>
<td valign="top" align="center">69.5392</td>
<td valign="top" align="center">66.5782</td></tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Mean gradient</td>
<td valign="top" align="center">9.9938</td>
<td valign="top" align="center">10.0303</td>
<td valign="top" align="center">6.509</td>
<td valign="top" align="center">10.0089</td>
<td valign="top" align="center">10.2068</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Spatial frequency</td>
<td valign="top" align="center">26.7087</td>
<td valign="top" align="center">26.7329</td>
<td valign="top" align="center">16.1614</td>
<td valign="top" align="center">26.6568</td>
<td valign="top" align="center">27.1771</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Information entropy</td>
<td valign="top" align="center">4.7735</td>
<td valign="top" align="center">4.834</td>
<td valign="top" align="center">5.4105</td>
<td valign="top" align="center">4.8036</td>
<td valign="top" align="center">4.8966</td>
</tr>
<tr>
<td valign="top" align="left">Set 2</td>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">59.1931</td>
<td valign="top" align="center">59.2093</td>
<td valign="top" align="center">52.0837</td>
<td valign="top" align="center">61.4981</td>
<td valign="top" align="center">60.6457</td></tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Mean gradient</td>
<td valign="top" align="center">6.7482</td>
<td valign="top" align="center">7.0266</td>
<td valign="top" align="center">4.5756</td>
<td valign="top" align="center">7</td>
<td valign="top" align="center">7.0461</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Spatial frequency</td>
<td valign="top" align="center">19.0263</td>
<td valign="top" align="center">19.3264</td>
<td valign="top" align="center">11.8249</td>
<td valign="top" align="center">19.3257</td>
<td valign="top" align="center">19.512</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Information entropy</td>
<td valign="top" align="center">3.9901</td>
<td valign="top" align="center">4.1834</td>
<td valign="top" align="center">4.5922</td>
<td valign="top" align="center">4.0985</td>
<td valign="top" align="center">4.2156</td>
</tr>
<tr>
<td valign="top" align="left">Set 3</td>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">56.0974</td>
<td valign="top" align="center">58.787</td>
<td valign="top" align="center">47.6032</td>
<td valign="top" align="center">56.0943</td>
<td valign="top" align="center">57.7578</td></tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Mean gradient</td>
<td valign="top" align="center">7.9023</td>
<td valign="top" align="center">8.111</td>
<td valign="top" align="center">5.4579</td>
<td valign="top" align="center">7.9592</td>
<td valign="top" align="center">7.966</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Spatial frequency</td>
<td valign="top" align="center">22.2846</td>
<td valign="top" align="center">22.4084</td>
<td valign="top" align="center">13.907</td>
<td valign="top" align="center">21.9421</td>
<td valign="top" align="center">22.0022</td>
</tr>
<tr>
<td valign="top" align="left"></td>
<td valign="top" align="left">Information entropy</td>
<td valign="top" align="center">3.895</td>
<td valign="top" align="center">4.1058</td>
<td valign="top" align="center">5.1943</td>
<td valign="top" align="center">4.2228</td>
<td valign="top" align="center">4.2897</td></tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec><title>Conclusion</title>
<p>In this paper, a new fusion method for SPECT brain and CT brain images was put forward. First, NSCT was used to decompose the IHS transform of a SPECT and CT image. The fusion rules, based on the regional average energy, was then used for low-frequency coefficients and the combination of SFLA and the PCNN was used for high-frequency sub-bands. Finally, the fused image was produced by reversed NSCT and reversed IHS transform. Both subjective evaluations and objective evaluations were used to analyze the quality of the fused images. The results demonstrated that the method we put forward can retain the information of source images better and reveal more details in integration. It can be seen that the proposed method is valid and effective in achieving satisfactory fusion results, leading to a wide range of applications in practice.</p>
<p>The paper focuses on multi-mode medical image fusion. However, there is a negative correlation between the real-time processing speed and the effectiveness of medical image fusion. Under the premise of ensuring the quality of fusion results, how to improve the efficiency of the method should be considered in the future.</p>
</sec>
<sec><title>Data Availability</title>
<p>Publicly available datasets were analyzed in this study. This data can be found here: <ext-link ext-link-type="uri" xlink:href="http://www.med.harvard.edu/aanlib/">http://www.med.harvard.edu/aanlib/</ext-link>.</p>
</sec>
<sec><title>Author Contributions</title>
<p>CH conceived the study. GT and CH designed the model. YC and YP analyzed the data. YL and WC wrote the draft. EN and YH interpreted the results. All authors gave critical revision and consent for this submission.</p>
</sec>
<sec><title>Conflict of Interest Statement</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
</body>
<back>
<fn-group>
<fn fn-type="financial-disclosure">
<p><bold>Funding.</bold> This work was supported in part by the Tongji University Short-term Study Abroad Program under Grant 2018020017, National Science and Technology Support Program under Grant 2015BAF10B01, and National Natural Science Foundation of China under Grants 81670403, 81500381, and 81201069. CH acknowledges support from Tongji University for the exchange with Nanyang Technological University.</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aishwarya</surname> <given-names>N.</given-names></name> <name><surname>Thangammal</surname> <given-names>C. B.</given-names></name></person-group> (<year>2017</year>). <article-title>An image fusion framework using novel dictionary based sparse representation.</article-title> <source><italic>Multimed. Tools Appl.</italic></source> <volume>76</volume> <fpage>21869</fpage>&#x2013;<lpage>21888</lpage>. <pub-id pub-id-type="doi">10.1007/s11042-016-4030-x</pub-id></citation></ref>
<ref id="B2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Amolins</surname> <given-names>K.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Dare</surname> <given-names>P.</given-names></name></person-group> (<year>2007</year>). <article-title>Wavelet based image fusion techniques- an introduction, review and comparison.</article-title> <source><italic>ISPRS J. Photogramm. Remote Sens.</italic></source> <volume>62</volume> <fpage>249</fpage>&#x2013;<lpage>263</lpage>. <pub-id pub-id-type="doi">10.1016/j.isprsjprs.2007.05.009</pub-id></citation></ref>
<ref id="B3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Choi</surname> <given-names>M.</given-names></name></person-group> (<year>2006</year>). <article-title>A new intensity-hue-saturation fusion approach to image fusion with a tradeoff parameter.</article-title> <source><italic>IEEE Trans. Geosci. Remote Sens.</italic></source> <volume>44</volume> <fpage>1672</fpage>&#x2013;<lpage>1682</lpage>. <pub-id pub-id-type="doi">10.1109/TGRS.2006.869923</pub-id></citation></ref>
<ref id="B4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Da</surname> <given-names>C.</given-names></name> <name><surname>Zhou</surname> <given-names>J.</given-names></name> <name><surname>Do</surname> <given-names>M.</given-names></name></person-group> (<year>2006</year>). <article-title>&#x201C;The non-subsampled contourlet transform: theory, design, and applications,&#x201D; in</article-title> <source><italic>Proceedings of the IEEE Transaction on Image Processing</italic></source> <volume>Vol. 15</volume> (<publisher-loc>Piscataway, NJ</publisher-loc>: <publisher-name>IEEE</publisher-name>) <fpage>3089</fpage>&#x2013;<lpage>3101</lpage>. <pub-id pub-id-type="doi">10.1109/TIP.2006.877507</pub-id></citation></ref>
<ref id="B5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Du</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>W.</given-names></name> <name><surname>Xiao</surname> <given-names>B.</given-names></name> <name><surname>Nawaz</surname> <given-names>Q.</given-names></name></person-group> (<year>2016a</year>). <article-title>An overview of multi-modal medical image fusion.</article-title> <source><italic>Knowl. Based Syst.</italic></source> <volume>215</volume> <fpage>3</fpage>&#x2013;<lpage>20</lpage>.</citation></ref>
<ref id="B6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Du</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>W.</given-names></name> <name><surname>Xiao</surname> <given-names>B.</given-names></name> <name><surname>Nawaz</surname> <given-names>Q.</given-names></name></person-group> (<year>2016b</year>). <article-title>Medical image fusion by combining parallel features on multi-scale local extrema scheme.</article-title> <source><italic>Knowl. Based Syst.</italic></source> <volume>113</volume> <fpage>4</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/j.knosys.2016.09.008</pub-id></citation></ref>
<ref id="B7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Du</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>W.</given-names></name> <name><surname>Xiao</surname> <given-names>B.</given-names></name> <name><surname>Nawaz</surname> <given-names>Q.</given-names></name></person-group> (<year>2016c</year>). <article-title>Union Laplacian pyramid with multiple features for medical image fusion.</article-title> <source><italic>Neurocomputing</italic></source> <volume>194</volume> <fpage>326</fpage>&#x2013;<lpage>339</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2016.02.047</pub-id></citation></ref>
<ref id="B8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Eckhorn</surname> <given-names>R.</given-names></name> <name><surname>Reitboeck</surname> <given-names>H. J.</given-names></name> <name><surname>Arndt</surname> <given-names>M.</given-names></name> <name><surname>Dicke</surname> <given-names>P.</given-names></name></person-group> (<year>1989</year>). <article-title>&#x201C;A neural network for feature linking via synchronous activity: results from cat visual cortex and from simulations,&#x201D; in</article-title> <source><italic>Models of Brain Function</italic></source> <role>ed.</role> <person-group person-group-type="editor"><name><surname>Cotterill</surname> <given-names>R. M. J.</given-names></name></person-group> (<publisher-loc>Cambridge</publisher-loc>: <publisher-name>Cambridge University Press</publisher-name>) <fpage>255</fpage>&#x2013;<lpage>272</lpage>. <pub-id pub-id-type="doi">10.1139/w00-039</pub-id></citation></ref>
<ref id="B9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ge</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Zhou</surname> <given-names>B.</given-names></name></person-group> (<year>2009</year>). <article-title>Study of intelligent inspection machine based on modified pulse coupled neural network.</article-title> <source><italic>Chin. J. Sci. Instrum.</italic></source> <volume>30</volume> <fpage>1866</fpage>&#x2013;<lpage>1873</lpage>.</citation></ref>
<ref id="B10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hansen</surname> <given-names>N. L.</given-names></name> <name><surname>Koo</surname> <given-names>B. C.</given-names></name> <name><surname>Gallagher</surname> <given-names>F. A.</given-names></name> <name><surname>Warren</surname> <given-names>A. Y.</given-names></name> <name><surname>Doble</surname> <given-names>A.</given-names></name> <name><surname>Gnanapragasam</surname> <given-names>V.</given-names></name><etal/></person-group> (<year>2017</year>). <article-title>Comparison of initial and tertiary centre second opinion reads of multiparametric magnetic resonance imaging of the prostate prior to repeat biopsy.</article-title> <source><italic>Eur. Radiol.</italic></source> <volume>27</volume> <fpage>2259</fpage>&#x2013;<lpage>2266</lpage>. <pub-id pub-id-type="doi">10.1007/s00330-016-4635-5</pub-id> <pub-id pub-id-type="pmid">27778089</pub-id></citation></ref>
<ref id="B11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>C.</given-names></name> <name><surname>Xie</surname> <given-names>Y.</given-names></name> <name><surname>Lan</surname> <given-names>Y.</given-names></name> <name><surname>Hao</surname> <given-names>Y.</given-names></name> <name><surname>Chen</surname> <given-names>F.</given-names></name> <name><surname>Cheng</surname> <given-names>Y.</given-names></name><etal/></person-group> (<year>2018</year>). <article-title>A new framework for the integrative analytics of intravascular ultrasound and optical coherence tomography images.</article-title> <source><italic>IEEE Access</italic></source> <volume>6</volume> <fpage>36408</fpage>&#x2013;<lpage>36419</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2018.2839694</pub-id></citation></ref>
<ref id="B12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>D.</given-names></name></person-group> (<year>1999</year>). <article-title>Radial basis probabilistic neural networks: model and application.</article-title> <source><italic>Int. J. Pattern Recogn.</italic></source> <volume>13</volume> <fpage>1083</fpage>&#x2013;<lpage>1101</lpage>. <pub-id pub-id-type="doi">10.1109/LGRS.2010.2046715</pub-id></citation></ref>
<ref id="B13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>D.</given-names></name></person-group> (<year>2004</year>). <article-title>A constructive approach for finding arbitrary roots of polynomials by neural networks.</article-title> <source><italic>IEEE Trans. Neural Netw.</italic></source> <volume>15</volume> <fpage>477</fpage>&#x2013;<lpage>491</lpage>. <pub-id pub-id-type="doi">10.1109/TNN.2004.824424</pub-id> <pub-id pub-id-type="pmid">15384540</pub-id></citation></ref>
<ref id="B14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>D.</given-names></name> <name><surname>Du</surname> <given-names>J.</given-names></name></person-group> (<year>2008</year>). <article-title>A constructive hybrid structure optimization methodology for radial basis probabilistic neural networks.</article-title> <source><italic>IEEE Trans. Neural Netw.</italic></source> <volume>19</volume> <fpage>2099</fpage>&#x2013;<lpage>2115</lpage>. <pub-id pub-id-type="doi">10.1109/TNN.2008.2004370</pub-id> <pub-id pub-id-type="pmid">19054734</pub-id></citation></ref>
<ref id="B15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>D.</given-names></name> <name><surname>Horace</surname> <given-names>H.</given-names></name> <name><surname>Chi</surname> <given-names>Z.</given-names></name></person-group> (<year>2004</year>). <article-title>A neural root finder of polynomials based on root moments.</article-title> <source><italic>Neural Comput.</italic></source> <volume>16</volume> <fpage>1721</fpage>&#x2013;<lpage>1762</lpage>. <pub-id pub-id-type="doi">10.1162/089976604774201668</pub-id></citation></ref>
<ref id="B16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>D.</given-names></name> <name><surname>Jiang</surname> <given-names>W.</given-names></name></person-group> (<year>2012</year>). <article-title>A general CPL-AdS methodology for fixing dynamic parameters in dual environments.</article-title> <source><italic>IEEE Trans. Syst. Man Cybern. B Cybern.</italic></source> <volume>42</volume> <fpage>1489</fpage>&#x2013;<lpage>1500</lpage>. <pub-id pub-id-type="doi">10.1109/TSMCB.2012.2192475</pub-id> <pub-id pub-id-type="pmid">22562768</pub-id></citation></ref>
<ref id="B17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>S.</given-names></name></person-group> (<year>1996</year>). <source><italic>Systematic Theory of Neural Networks for Pattern Recognition (in Chinese).</italic></source> <publisher-loc>Beijing</publisher-loc>: <publisher-name>Publishing House of Electronic Industry of China</publisher-name>.</citation></ref>
<ref id="B18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ji</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>G.</given-names></name></person-group> (<year>2017</year>). <article-title>Image fusion method of SAR and infrared image based on Curvelet transform with adaptive weighting.</article-title> <source><italic>Multimed. Tools Appl.</italic></source> <volume>76</volume> <fpage>17633</fpage>&#x2013;<lpage>17649</lpage>. <pub-id pub-id-type="doi">10.1007/s11042-016-4030-x</pub-id></citation></ref>
<ref id="B19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname> <given-names>P.</given-names></name> <name><surname>Zhang</surname> <given-names>Q.</given-names></name> <name><surname>Li</surname> <given-names>J.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name></person-group> (<year>2014</year>). <article-title>Fusion algorithm for infrared and visible image based on NSST and adaptive PCNN.</article-title> <source><italic>Laser Infrared</italic></source> <volume>44</volume> <fpage>108</fpage>&#x2013;<lpage>113</lpage>.</citation></ref>
<ref id="B20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jodoin</surname> <given-names>P. M.</given-names></name> <name><surname>Pinheiro</surname> <given-names>F.</given-names></name> <name><surname>Oudot</surname> <given-names>A.</given-names></name> <name><surname>Lalande</surname> <given-names>A.</given-names></name></person-group> (<year>2015</year>). <article-title>Left-ventricle segmentation of SPECT images of rats.</article-title> <source><italic>IEEE Trans. Biomed. Eng.</italic></source> <volume>5</volume> <fpage>2260</fpage>&#x2013;<lpage>2268</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2015.2422263</pub-id> <pub-id pub-id-type="pmid">25879835</pub-id></citation></ref>
<ref id="B21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Johnson</surname> <given-names>K. A.</given-names></name> <name><surname>Becker</surname> <given-names>J. A.</given-names></name></person-group> (<year>2001</year>). <source><italic>The Whole Brain Altas.</italic></source> <comment>Available at: <ext-link ext-link-type="uri" xlink:href="http://www.med.harvard.edu/aanlib/">http://www.med.harvard.edu/aanlib/</ext-link></comment></citation></ref>
<ref id="B22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kanmani</surname> <given-names>M.</given-names></name> <name><surname>Narasimhan</surname> <given-names>V.</given-names></name></person-group> (<year>2017</year>). <article-title>An optimal weighted averaging fusion strategy for thermal and visible images using dual tree discrete wavelet transform and self tunning particle swarm optimization.</article-title> <source><italic>Multimed. Tools Appl.</italic></source> <volume>76</volume> <fpage>20989</fpage>&#x2013;<lpage>21010</lpage>. <pub-id pub-id-type="doi">10.1007/s11042-016-4030-x</pub-id></citation></ref>
<ref id="B23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kaur</surname> <given-names>P.</given-names></name> <name><surname>Mehta</surname> <given-names>S.</given-names></name></person-group> (<year>2017</year>). <article-title>Resource provisioning and work flow scheduling in clouds using augmented shuffled frog leaping algorithm.</article-title> <source><italic>J. Parallel Distrib. Comput.</italic></source> <volume>101</volume> <fpage>41</fpage>&#x2013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpdc.2016.11.003</pub-id></citation></ref>
<ref id="B24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kavitha</surname> <given-names>C. T.</given-names></name> <name><surname>Chellamuthu</surname> <given-names>C.</given-names></name></person-group> (<year>2014</year>). <article-title>Fusion of SPECT and MRI images using integer wavelet transform in combination with curvelet transform.</article-title> <source><italic>Imaging Sci. J.</italic></source> <volume>63</volume> <fpage>17</fpage>&#x2013;<lpage>24</lpage>. <pub-id pub-id-type="doi">10.1179/1743131X14Y.0000000092</pub-id></citation></ref>
<ref id="B25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>R.</given-names></name> <name><surname>Ji</surname> <given-names>C.</given-names></name> <name><surname>Sun</surname> <given-names>P.</given-names></name> <name><surname>Liu</surname> <given-names>D.</given-names></name> <name><surname>Zhang</surname> <given-names>P.</given-names></name> <name><surname>Li</surname> <given-names>J.</given-names></name></person-group> (<year>2018</year>). <article-title>Optimizing operation of cascade reservoirs based on improved shuffled frog leaping algorithm.</article-title> <source><italic>J. Yangtze River Sci. Res. Inst.</italic></source> <volume>35</volume> <fpage>30</fpage>&#x2013;<lpage>35</lpage>.</citation></ref>
<ref id="B26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rahmani</surname> <given-names>S.</given-names></name> <name><surname>Strait</surname> <given-names>M.</given-names></name> <name><surname>Merkurjev</surname> <given-names>D.</given-names></name> <name><surname>Moeller</surname> <given-names>M.</given-names></name> <name><surname>Wittman</surname> <given-names>T.</given-names></name></person-group> (<year>2010</year>). <article-title>An adaptive IHS pan-sharpening method.</article-title> <source><italic>IEEE Geosci. Remote Sens. Lett.</italic></source> <volume>7</volume> <fpage>746</fpage>&#x2013;<lpage>750</lpage>. <pub-id pub-id-type="doi">10.1109/LGRS.2010.2046715</pub-id></citation></ref>
<ref id="B27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Samuel</surname> <given-names>G. C.</given-names></name> <name><surname>Asir Rajan</surname> <given-names>C. C.</given-names></name></person-group> (<year>2015</year>). <article-title>Hybrid: particle swarm optimization-genetic algorithm and particle swarm optimization-shuffled frog leaping algorithm for long-term generator maintenance scheduling.</article-title> <source><italic>Int. J. Elec. Power</italic></source> <volume>65</volume> <fpage>432</fpage>&#x2013;<lpage>442</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijepes.2014.10.042</pub-id> <pub-id pub-id-type="pmid">24892057</pub-id></citation></ref>
<ref id="B28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sapkheyli</surname> <given-names>A.</given-names></name> <name><surname>Zain</surname> <given-names>A. M.</given-names></name> <name><surname>Sharif</surname> <given-names>S.</given-names></name></person-group> (<year>2015</year>). <article-title>The role of basic, modified and hybrid shuffled frog leaping algorithm on optimization problems: a review.</article-title> <source><italic>Soft Comput.</italic></source> <volume>19</volume> <fpage>2011</fpage>&#x2013;<lpage>2038</lpage>. <pub-id pub-id-type="doi">10.1007/s00500-014-1388-4</pub-id></citation></ref>
<ref id="B29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>S.</given-names></name> <name><surname>Gupta</surname> <given-names>D.</given-names></name> <name><surname>Anand</surname> <given-names>R. S.</given-names></name> <name><surname>Kumar</surname> <given-names>V.</given-names></name></person-group> (<year>2015</year>). <article-title>Nonsubsampled shearlet based CT and MRI medical image fusion using biologically inspired spiking neural network.</article-title> <source><italic>Biomed. Signal Process. Control</italic></source> <volume>18</volume> <fpage>91</fpage>&#x2013;<lpage>101</lpage>. <pub-id pub-id-type="doi">10.1016/j.bspc.2014.11.009</pub-id></citation></ref>
<ref id="B30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>D.</given-names></name> <name><surname>Zhou</surname> <given-names>J.</given-names></name></person-group> (<year>2010</year>). <article-title>Image fusion algorithm based on the nonsubsampled contourlet transform.</article-title> <source><italic>Comput. Syst. Appl.</italic></source> <volume>19</volume> <fpage>220</fpage>&#x2013;<lpage>224</lpage>. <pub-id pub-id-type="doi">10.1109/ICFCC.2010.5497801</pub-id></citation></ref>
<ref id="B31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>J.</given-names></name> <name><surname>Huang</surname> <given-names>H.</given-names></name> <name><surname>Qiu</surname> <given-names>Y.</given-names></name> <name><surname>Wu</surname> <given-names>H.</given-names></name> <name><surname>Tian</surname> <given-names>J.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name></person-group> (<year>2005</year>). <article-title>&#x201C;Remote sensing image fusion based on average gradient of wavelet transform,&#x201D; in</article-title> <source><italic>Proccedings of the 2005 IEEE International Conference on Mechatronics and Automation</italic></source> <volume>Vol. 4</volume> (<publisher-loc>Niagara Falls, ON</publisher-loc>: <publisher-name>IEEE</publisher-name>) <fpage>1817</fpage>&#x2013;<lpage>1821</lpage>. <pub-id pub-id-type="doi">10.1109/ICMA.2005.1626836</pub-id></citation></ref>
<ref id="B32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xiang</surname> <given-names>T.</given-names></name> <name><surname>Yan</surname> <given-names>L.</given-names></name> <name><surname>Gao</surname> <given-names>R.</given-names></name></person-group> (<year>2015</year>). <article-title>A fusion algorithm for infrared and visible images based on adaptive dual-channel unit-linking PCNN in NSCT domain.</article-title> <source><italic>Infrared Phys. Technol.</italic></source> <volume>69</volume> <fpage>53</fpage>&#x2013;<lpage>61</lpage>. <pub-id pub-id-type="doi">10.1016/j.infrared.2015.01.002</pub-id></citation></ref>
<ref id="B33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xin</surname> <given-names>Y.</given-names></name> <name><surname>Deng</surname> <given-names>L.</given-names></name></person-group> (<year>2013</year>). <article-title>An improved remote sensing image fusion method based on wavelet transform.</article-title> <source><italic>Laser Optoelectron. Prog.</italic></source> <volume>15</volume> <fpage>133</fpage>&#x2013;<lpage>138</lpage>.</citation></ref>
<ref id="B34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>J.</given-names></name> <name><surname>Wu</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Xiong</surname> <given-names>Y.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x201C;A novel fusion technique for CT and MRI medical image based on NSST,&#x201D; in</article-title> <source><italic>Proccedings of the Chinese Control and Decision Conference</italic></source> (<publisher-loc>Yinchuan</publisher-loc>: <publisher-name>IEEE</publisher-name>) <fpage>4367</fpage>&#x2013;<lpage>4372</lpage>. <pub-id pub-id-type="doi">10.1109/CCDC.2016.7531752</pub-id></citation></ref>
<ref id="B35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeng</surname> <given-names>N.</given-names></name> <name><surname>Qiu</surname> <given-names>H.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Liu</surname> <given-names>W.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name></person-group> (<year>2018</year>). <article-title>A new switching-delayed-PSO-based optimized SVM algorithm for diagnosis of Alzheimer&#x2019;s disease.</article-title> <source><italic>Neurocomputing</italic></source> <volume>320</volume> <fpage>195</fpage>&#x2013;<lpage>202</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2018.09.001</pub-id></citation></ref>
<ref id="B36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeng</surname> <given-names>N.</given-names></name> <name><surname>Wang</surname> <given-names>H.</given-names></name> <name><surname>Liu</surname> <given-names>W.</given-names></name> <name><surname>Liang</surname> <given-names>J.</given-names></name> <name><surname>Alsaadi</surname> <given-names>F. E.</given-names></name></person-group> (<year>2017a</year>). <article-title>A switching delayed PSO optimized extreme learning machine for short-term load forecasting.</article-title> <source><italic>Neurocomputing</italic></source> <volume>240</volume> <fpage>175</fpage>&#x2013;<lpage>182</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2017.01.090</pub-id></citation></ref>
<ref id="B37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeng</surname> <given-names>N.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Liang</surname> <given-names>J.</given-names></name> <name><surname>Dobaie</surname> <given-names>A. M.</given-names></name></person-group> (<year>2017b</year>). <article-title>Denoising and deblurring gold immunochromatographic strip images via gradient projection algorithms.</article-title> <source><italic>Neurocomputing</italic></source> <volume>247</volume> <fpage>165</fpage>&#x2013;<lpage>172</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2017.03.056</pub-id></citation></ref>
<ref id="B38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeng</surname> <given-names>N.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name></person-group> (<year>2016a</year>). <article-title>Inferring nonlinear lateral flow immunoassay state-space models via an unscented Kalman filter.</article-title> <source><italic>Sci. China Inform. Sci.</italic></source> <volume>59</volume>:<issue>112204</issue>. <pub-id pub-id-type="doi">10.1007/s11432-016-0280-9</pub-id></citation></ref>
<ref id="B39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeng</surname> <given-names>N.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Alsaadi</surname> <given-names>F. E.</given-names></name></person-group> (<year>2016b</year>). <article-title>A novel switching delayed PSO algorithm for estimating unknown parameters of lateral flow immunoassay.</article-title> <source><italic>Cogn. Comput.</italic></source> <volume>8</volume> <fpage>143</fpage>&#x2013;<lpage>152</lpage>. <pub-id pub-id-type="doi">10.1007/s12559-016-9396-6</pub-id></citation></ref>
<ref id="B40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeng</surname> <given-names>N.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Zineddin</surname> <given-names>B.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Du</surname> <given-names>M.</given-names></name> <name><surname>Xiao</surname> <given-names>L.</given-names></name><etal/></person-group> (<year>2014</year>). <article-title>Image-based quantitative analysis of gold immunochromato graphic strip via cellular neural network approach.</article-title> <source><italic>IEEE Trans. Med. Imaging</italic></source> <volume>33</volume> <fpage>1129</fpage>&#x2013;<lpage>1136</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2014.2305394</pub-id> <pub-id pub-id-type="pmid">24770917</pub-id></citation></ref>
<ref id="B41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Sun</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Teng</surname> <given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>Double regularization medical CT image blind restoration reconstruction based on proximal alternating direction method of multipliers.</article-title> <source><italic>EURASIP J. Image Video Process.</italic></source> <volume>2017</volume>:<issue>70</issue>. <pub-id pub-id-type="doi">10.1186/s13640-017-0218-x</pub-id></citation></ref>
<ref id="B42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Feng</surname> <given-names>Y.</given-names></name></person-group> (<year>2017</year>). <article-title>Image fusion based on simultaneous empirical wavelet transform.</article-title> <source><italic>Multimed. Tools Appl.</italic></source> <volume>76</volume> <fpage>8175</fpage>&#x2013;<lpage>8193</lpage>. <pub-id pub-id-type="doi">10.1007/s11042-016-4030-x</pub-id></citation></ref>
<ref id="B43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>Q.</given-names></name> <name><surname>He</surname> <given-names>L.</given-names></name> <name><surname>Wen</surname> <given-names>P.</given-names></name></person-group> (<year>2012</year>). <article-title>Image fusion method based on average grads and wavelet contrast.</article-title> <source><italic>Comput. Eng. Appl.</italic></source> <volume>48</volume> <fpage>165</fpage>&#x2013;<lpage>168</lpage>. <pub-id pub-id-type="doi">10.3778/j.issn.1002-8331.2012.24.037</pub-id></citation></ref>
</ref-list>
</back>
</article>