<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Aging Neurosci.</journal-id>
<journal-title>Frontiers in Aging Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Aging Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1663-4365</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnagi.2020.618538</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Systematic and Comprehensive Automated Ventricle Segmentation on Ventricle Images of the Elderly Patients: A Retrospective Study</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Zhou</surname> <given-names>Xi</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x02020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1117225/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ye</surname> <given-names>Qinghao</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x02020;</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Jiang</surname> <given-names>Yinghui</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x02020;</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Wang</surname> <given-names>Minhao</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Niu</surname> <given-names>Zhangming</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Menpes-Smith</surname> <given-names>Wade</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Fang</surname> <given-names>Evandro Fei</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname> <given-names>Zhi</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1076654/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Xia</surname> <given-names>Jun</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1034956/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Yang</surname> <given-names>Guang</given-names></name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/401153/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Radiology, Shenzhen Second People&#x00027;s Hospital, The First Affiliated Hospital of Shenzhen University Health Science Center</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Hangzhou Ocean&#x00027;s Smart Boya Co., Ltd.</institution>, <addr-line>Hangzhou</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>Mind Rank Ltd.</institution>, <addr-line>Hongkong</addr-line>, <country>China</country></aff>
<aff id="aff4"><sup>4</sup><institution>Aladdin Healthcare Technologies Ltd.</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff5"><sup>5</sup><institution>Department of Clinical Molecular Biology, University of Oslo</institution>, <addr-line>Oslo</addr-line>, <country>Norway</country></aff>
<aff id="aff6"><sup>6</sup><institution>School of Information Science and Engineering, Shandong University</institution>, <addr-line>Qingdao</addr-line>, <country>China</country></aff>
<aff id="aff7"><sup>7</sup><institution>Cardiovascular Research Centre, Royal Brompton Hospital</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff8"><sup>8</sup><institution>National Heart and Lung Institute, Imperial College London</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Feiqi Zhu, Shenzhen University, China</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Lin Gu, National Institute of Informatics, Japan; Xurui Jin, Duke Kunshan University, China</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Jun Xia <email>xiajun&#x00040;email.szu.edu.cn</email></corresp>
<corresp id="c002">Guang Yang <email>g.yang&#x00040;imperial.ac.uk</email></corresp>
<fn fn-type="other" id="fn001"><p>&#x02020;These authors have contributed equally to this work</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>16</day>
<month>12</month>
<year>2020</year>
</pub-date>
<pub-date pub-type="collection">
<year>2020</year>
</pub-date>
<volume>12</volume>
<elocation-id>618538</elocation-id>
<history>
<date date-type="received">
<day>17</day>
<month>10</month>
<year>2020</year>
</date>
<date date-type="accepted">
<day>23</day>
<month>11</month>
<year>2020</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2020 Zhou, Ye, Jiang, Wang, Niu, Menpes-Smith, Fang, Liu, Xia and Yang.</copyright-statement>
<copyright-year>2020</copyright-year>
<copyright-holder>Zhou, Ye, Jiang, Wang, Niu, Menpes-Smith, Fang, Liu, Xia and Yang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license> </permissions>
<abstract><p><bold>Background and Objective:</bold> Ventricle volume is closely related to hydrocephalus, brain atrophy, Alzheimer&#x00027;s, Parkinson&#x00027;s syndrome, and other diseases. To accurately measure the volume of the ventricles for elderly patients, we use deep learning to establish a systematic and comprehensive automated ventricle segmentation framework.</p>
<p><bold>Methods:</bold> The study participation included 20 normal elderly people, 20 patients with cerebral atrophy, 64 patients with normal pressure hydrocephalus, and 51 patients with acquired hydrocephalus. Second, get their imaging data through the picture archiving and communication systems (PACS) system. Then use ITK software to manually label participants&#x00027; ventricular structures. Finally, extract imaging features through machine learning.</p>
<p><bold>Results:</bold> This automated ventricle segmentation method can be applied not only to CT and MRI images but also to images with different scan slice thicknesses. More importantly, it produces excellent segmentation results (Dice &#x0003E; 0.9).</p>
<p><bold>Conclusion:</bold> This automated ventricle segmentation method has wide applicability and clinical practicability. It can help clinicians find early disease, diagnose disease, understand the patient&#x00027;s disease progression, and evaluate the patient&#x00027;s treatment effect.</p></abstract>
<kwd-group>
<kwd>deep learning</kwd>
<kwd>neuroimage</kwd>
<kwd>magnetic resonance imaging</kwd>
<kwd>ventricular segmentation</kwd>
<kwd>image segmentation</kwd>
<kwd>convolutional neural network (CNN)</kwd>
<kwd>computer tomography (CT)</kwd>
</kwd-group>
<counts>
<fig-count count="3"/>
<table-count count="5"/>
<equation-count count="6"/>
<ref-count count="50"/>
<page-count count="10"/>
<word-count count="7629"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>Introduction</title>
<p>The volume of the ventricle has always been closely related to degenerative brain diseases and traumatic brain injury. Researchers have also described that enlargement of the ventricles is an important characteristic of medical conditions such as schizophrenia, Parkinson&#x00027;s disease, Alzheimer&#x00027;s disease, hydrocephalus, and trauma to the brain (Silbert et al., <xref ref-type="bibr" rid="B39">2003</xref>; Thompson et al., <xref ref-type="bibr" rid="B43">2004</xref>; Chou et al., <xref ref-type="bibr" rid="B8">2009</xref>; Liu et al., <xref ref-type="bibr" rid="B22">2010</xref>; Cavedo et al., <xref ref-type="bibr" rid="B5">2012</xref>; Khan et al., <xref ref-type="bibr" rid="B19">2012</xref>; Anandh et al., <xref ref-type="bibr" rid="B2">2016</xref>; Del et al., <xref ref-type="bibr" rid="B12">2016</xref>; Owen et al., <xref ref-type="bibr" rid="B29">2018</xref>; Kocaman et al., <xref ref-type="bibr" rid="B20">2019</xref>; Lundervold et al., <xref ref-type="bibr" rid="B24">2019</xref>). In some disease diagnosis guidelines, EI &#x0003E; 0.3 is often defined as ventricular enlargement (Relkin et al., <xref ref-type="bibr" rid="B33">2005</xref>; Mori et al., <xref ref-type="bibr" rid="B26">2012</xref>). However, some studies have shown that the correlation between EI and ventricle volume is only 0.619 (Toma et al., <xref ref-type="bibr" rid="B44">2011</xref>). The measurement of EI is affected by different scan baselines and different measurement planes, which only reflects the local conditions of the ventricle at the selected level, and cannot fully assess the size of the ventricle (Ambarki et al., <xref ref-type="bibr" rid="B1">2010</xref>). Moreover, EI is sensitive to the expansion of the ventricle to both sides, and the effect is not good when evaluating patients whose ventricle expands to the long axis (He et al., <xref ref-type="bibr" rid="B16">2020</xref>). At the same time, in the normal elderly, the range of EI is relatively wide. Taking EI = 0.3 as the cut-off value, it is difficult to effectively distinguish between normal and enlarged ventricles (Brix et al., <xref ref-type="bibr" rid="B4">2017</xref>). Therefore, when we need the volume of the ventricle, and the volume of the ventricle is measurable, then we should use it (Ambarki et al., <xref ref-type="bibr" rid="B1">2010</xref>). Because of this, research on ventricle segmentation methods has brought much attention, and researchers have continuously optimized algorithms to make better and more accurate estimation (Chen et al., <xref ref-type="bibr" rid="B6">2009</xref>; Coupe et al., <xref ref-type="bibr" rid="B10">2011</xref>; Kempton et al., <xref ref-type="bibr" rid="B18">2011</xref>; Poh et al., <xref ref-type="bibr" rid="B30">2012</xref>; Qiu et al., <xref ref-type="bibr" rid="B32">2015</xref>; Tang et al., <xref ref-type="bibr" rid="B41">2015</xref>, <xref ref-type="bibr" rid="B42">2018</xref>; Qian et al., <xref ref-type="bibr" rid="B31">2017</xref>; Cherukuri et al., <xref ref-type="bibr" rid="B7">2018</xref>; Shao et al., <xref ref-type="bibr" rid="B37">2019</xref>; Dubost et al., <xref ref-type="bibr" rid="B13">2020</xref>).</p>
<p>Volumetric measurement is the only method to directly determine the ventricular size. It is realized by segmentation, which can be roughly categorized into automated segmentation and manual segmentation (Huff et al., <xref ref-type="bibr" rid="B17">2019</xref>). The manual segmentation technique is the gold standard for volumetric quantification of regional brain structures (Kocaman et al., <xref ref-type="bibr" rid="B20">2019</xref>), but when dealing with more data, manual segmentation of the ventricles is time-consuming, subjective, and less reproducible (Chou et al., <xref ref-type="bibr" rid="B9">2008</xref>; Liu et al., <xref ref-type="bibr" rid="B23">2009</xref>; Poh et al., <xref ref-type="bibr" rid="B30">2012</xref>). Therefore, it is highly in demand for an automated ventricle segmentation method to be developed and machine and deep learning based methods have emerged as the new era.</p>
<p>In the previous automated ventricle segmentation methods, researchers often conducted single-mode studies, i.e., segment either on CT images (Liu et al., <xref ref-type="bibr" rid="B22">2010</xref>; Poh et al., <xref ref-type="bibr" rid="B30">2012</xref>; Qian et al., <xref ref-type="bibr" rid="B31">2017</xref>; Cherukuri et al., <xref ref-type="bibr" rid="B7">2018</xref>) or MRI images (Qiu et al., <xref ref-type="bibr" rid="B32">2015</xref>; Tang et al., <xref ref-type="bibr" rid="B41">2015</xref>, <xref ref-type="bibr" rid="B42">2018</xref>). Therefore, the developed automated ventricle segmentation methods were rarely interchangeable. Moreover, various algorithms might perform differently in segmenting different sections of the ventricles (Chen et al., <xref ref-type="bibr" rid="B6">2009</xref>; Coupe et al., <xref ref-type="bibr" rid="B10">2011</xref>; Shao et al., <xref ref-type="bibr" rid="B37">2019</xref>; Dubost et al., <xref ref-type="bibr" rid="B13">2020</xref>). Most previous machine learning (including deep learning) based studies were developed using images with a slice thickness of &#x0003C;3 mm, because at the same scanning distance, the smaller of the image thickness, the more images could be obtained, which could be more conducive for machine/deep learning algorithms to extract more image features (Xia et al., <xref ref-type="bibr" rid="B48">2004</xref>; Coupe et al., <xref ref-type="bibr" rid="B10">2011</xref>; Kempton et al., <xref ref-type="bibr" rid="B18">2011</xref>). However, in clinical practice, due to time constraints, images with larger slice thicknesses are more common. Therefore, the clinical usage of these methods is relatively limited.</p>
<p>The reproducibility of machine/deep learning based algorithms across different scanners and pulse sequences had not always been comprehensively examined (Kempton et al., <xref ref-type="bibr" rid="B18">2011</xref>). Moreover, their accuracy in different clinical populations and sensitivity to real changes in brain volume could still be improved. A larger slice thickness would increase the partial volume effect, which could have a significant negative impact on the algorithm accuracy. For example, the intraventricular calcified area located at the border of the ventricle may not be recognized. Some cerebellar ventricle areas (anterior, posterior, and inferior horns of the lateral ventricle) may not be recognized because they are not connected to the core of the lateral ventricle (Liu et al., <xref ref-type="bibr" rid="B22">2010</xref>). In some automated ventricle segmentation methods, pathological ventricles were not included (Huff et al., <xref ref-type="bibr" rid="B17">2019</xref>), but pathological ventricles are common in the elderly, especially in patients with acquired hydrocephalus, because they may have brain trauma, brain tumors, subarachnoid hemorrhage, and it becomes extremely difficult to delineate the ventricle from these patients. Previous literature also reported the segmentation of the ventricle of idiopathic Normal Pressure Hydrocephalus (iNPH) patients (Shao et al., <xref ref-type="bibr" rid="B37">2019</xref>). These patients are prone to segmentation failure due to the enlarged ventricle. Therefore, our purpose is not only to optimize the algorithm and obtain more accurate results but more importantly, to make this automated ventricle segmentation method be more widely used and be trustworthy for clinical practice.</p>
<p>In summary, the goal of this study is to establish a deep learning based automated ventricle segmentation method that can be generally used for both CT and MRI images, and is versatile for both thin-layer and thick-layer images.</p>
</sec>
<sec sec-type="methods" id="s2">
<title>Methods</title>
<sec>
<title>Participants</title>
<p>First, we selected the images of patients over 60 years old who underwent brain CT or MRI examinations at Shenzhen Second People&#x00027;s Hospital from January 1, 2016 to December 31, 2019. Second, as we aimed to delineate the ventricle and perform a comprehensive analysis, we chose the normal elderly, the elderly with brain atrophy, the elderly with idiopathic normal pressure hydrocephalus, and the elderly with acquired hydrocephalus people. Because the shape and size of the ventricles of these four types of patients are very representative, showing a trend from normal to severe, which can help us systematically and comprehensively analyze the ventricular system. Third, the diagnostic results of these patients were agreed upon by two radiologists with more than 10 years of work experience and strictly followed the disease diagnosis guidelines. Last but not least, due to a large number of normal elderly people and patients with brain atrophy, a large number of manual labeling would be infeasible. However, there is no obvious deformation of their ventricle structure, and it is easier for the automatic ventricle segmentation of the normal elderly and the elderly with brain atrophy. Therefore, we arranged the normal elderly and the elderly with brain atrophy in the order of the time of head imaging examination and numbered them, and made the numbers into small pieces of paper, and placed them in a large carton. Using a simple random sampling method, let 20 doctors in the radiology department randomly sample small pieces of paper. In the end, we randomly selected 20 normal elderly people and elderly people with brain atrophy for manual marking. The flowchart of the admission and exclusion of patients is shown in <xref ref-type="fig" rid="F1">Figure 1</xref>, and the basic study population description is shown in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Study flow chart for the inclusion of participants.</p></caption>
<graphic xlink:href="fnagi-12-618538-g0001.tif"/>
</fig>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Demographic information of subjects used in this study.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th/>
<th valign="top" align="center"><bold>Normal elderly</bold></th>
<th valign="top" align="center"><bold>Brain atrophy</bold></th>
<th valign="top" align="center"><bold>Normal pressure hydrocephalus</bold></th>
<th valign="top" align="center"><bold>Acquired hydrocephalus</bold></th>
</tr>
<tr>
<th/>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 20)</bold></th>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 20)</bold></th>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 64)</bold></th>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 51)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Age<xref ref-type="table-fn" rid="TN1">&#x0002A;</xref> (years)</td>
<td valign="top" align="center">64.81 &#x000B1; 3.19</td>
<td valign="top" align="center">70.95 &#x000B1; 5.55</td>
<td valign="top" align="center">70.77 &#x000B1; 6.81</td>
<td valign="top" align="center">67.01 &#x000B1; 5.41</td>
</tr>
<tr>
<td valign="top" align="left">Sex (male:female)</td>
<td valign="top" align="center">11:9</td>
<td valign="top" align="center">12:8</td>
<td valign="top" align="center">37:27</td>
<td valign="top" align="center">31:20</td>
</tr>
<tr>
<td valign="top" align="left" colspan="5"><bold>Scanning status of imaging equipment</bold><xref ref-type="table-fn" rid="TN2"><sup>&#x00023;</sup></xref></td>
</tr>
<tr>
<td valign="top" align="left">CT-1</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">19</td>
<td valign="top" align="center">24</td>
</tr>
<tr>
<td valign="top" align="left">CT-2</td>
<td valign="top" align="center">6</td>
<td valign="top" align="center">7</td>
<td valign="top" align="center">28</td>
<td valign="top" align="center">16</td>
</tr>
<tr>
<td valign="top" align="left">MRI-1</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">23</td>
<td valign="top" align="center">18</td>
</tr>
<tr>
<td valign="top" align="left">MRI-2</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">6</td>
<td valign="top" align="center">31</td>
<td valign="top" align="center">20</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="TN1"><label>&#x0002A;</label><p><italic>Age reported as mean &#x000B1; standard deviation</italic>.</p></fn>
<fn id="TN2"><label>&#x00023;</label><p><italic>CT-1 represents the CT instrument of SOMATOM Definition Flash from Siemens, Germany. CT-2 represents the CT instrument of SOMATOM Emotion 16 from Siemens, Germany. MRI-1 represents the 1.5T MR scanner (Avanto, Siemens, Erlangen, Germany). MRI-2 represents the 3.0T MRI scanner (Prisma, Siemens, Erlangen, Germany). The slice thickness of CT image includes: 0.5, 1.0, 1.5, 2.0, 4.8, 5.0 mm. The slice thickness of MRI image includes: 1.0, 7.8, 8.0 mm</italic>.</p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec>
<title>Ethics Statement</title>
<p>This study was carried out in accordance with the recommendations of the Ethics Committee of The First Affiliated Hospital of Shenzhen University and Shenzhen Second People&#x00027;s hospital. All subjects gave written informed consent in accordance with the Declaration of Helsinki.</p>
</sec>
<sec>
<title>Imaging Protocol and Label</title>
<p>First, a CT scan of the head was performed on two CT instruments, one of which was SOMATOM Definition Flash from Siemens, Germany, and the other was SOMATOM Emotion 16 from Siemens, Germany. Secondly, MRI examinations were conducted using a 1.5T MR scanner (Avanto, Siemens, Erlangen, Germany), and a 3.0T MRI scanner (Prisma, Siemens, Erlangen, Germany). All images were stored in the picture archiving and communication systems (PACS).</p>
<p>Then manually delineation of the ventricle was conducted. For MRI images, we chose T1WI for manual labeling. The specific manual labeling process is as follows: (1) two radiologists with 10 years of clinical experience used ITK software to label the ventricles; (2) a senior radiologist with 20 years of clinical experience evaluated the delineation results of the ventricles and made adjustment if inaccurate manual labeling was found; and (3) for the controversial annotated cases, we invited a neurology expert and a neurosurgery expert to discuss, and modifications and the final annotation results were approved by them.</p>
<p>We defined the thick layer image when the scan layer thickness was &#x0003E;3 mm, and otherwise, it was defined as the thin layer image. Therefore, all images were classified into four groups, i.e., thin-slice CT images, thick-slice CT images, thin-slice MRI images, and thick-slice MRI images.</p>
</sec>
<sec>
<title>The Proposed Deep Learning Framework</title>
<p>In real-world scenarios, the thick-slice images are more easily obtained, while thin-slices images are rare, and it is more difficult for clinicians to annotate them. Moreover, the distribution of different image thicknesses can result in the <italic>domain shift</italic> problem that can confuse the deep learning models (Yan et al., <xref ref-type="bibr" rid="B49">2019</xref>). Therefore, we proposed a thickness agnostic image segmentation model, which only required the annotation of thick-slice images for the model training.</p>
<p>Our goal is to utilize the unlabeled thin-slice images to minimize the performance gap between thick-slice and thin-slice images. In our model, the thick-slice images are denoted as <inline-formula><mml:math id="M1"><mml:msub><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mrow><mml:mi>S</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>|</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>W</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mn>3</mml:mn></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>W</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula>, and the thin-slices images are represented as <inline-formula><mml:math id="M2"><mml:msub><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>W</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>3</mml:mn></mml:mrow></mml:msup></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula>.</p>
<p>With the increased development and application of deep learning methods, encoder-decoder based architectures (Milletari et al., <xref ref-type="bibr" rid="B25">2016</xref>; Zhou et al., <xref ref-type="bibr" rid="B50">2018</xref>) are widely used in medical image segmentation. The workflow of our proposed deep learning based framework is presented in <xref ref-type="fig" rid="F2">Figure 2</xref>. For image feature extraction and reconstruction, we adopted ResNet-34, which was pre-trained on ImageNet datasets as the encoder of input images. For the decoder, sub-pixel convolution was used for constructing segmentation results since the deconvolution operation was computationally heavy and interpolation-based methods could not bring additional information to improve the segmentation. The sub-pixel convolution can be then represented as</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M3"><mml:mtable class="eqnarray" columnalign="right center left"><mml:mtr><mml:mtd><mml:msup><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mi>S</mml:mi><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>*</mml:mo><mml:msup><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>SP</italic>(&#x000B7;) operator transforms and arranges a tensor with the shape of <italic>H</italic> &#x000D7; <italic>W</italic> &#x000D7; <italic>C</italic> &#x000D7; <italic>r</italic><sup>2</sup> into a tensor shaped in <italic>rH</italic> &#x000D7; <italic>rW</italic> &#x000D7; <italic>C</italic>, <italic>F</italic><sup><italic>L</italic>&#x02212;1</sup> and <italic>F</italic><sup><italic>L</italic></sup> are the input feature and output feature, <italic>W</italic><sub><italic>L</italic></sub> and <italic>b</italic><sub><italic>L</italic></sub> are the parameters of the sub-pixel convolution operator.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>The workflow of proposed methods.</p></caption>
<graphic xlink:href="fnagi-12-618538-g0002.tif"/>
</fig>
<p>We took both thick-slice and thin-slice images as the input and optimized our model with the following objective function</p>
<disp-formula id="E2"><label>(2)</label><mml:math id="M4"><mml:mtable class="eqnarray" columnalign="right center left"><mml:mtr><mml:mtd><mml:mi>L</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>S</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003BB;</mml:mi><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where &#x003BB; is a hyper-parameter for weighting the impact of <italic>L</italic><sub><italic>s</italic></sub> and <italic>L</italic><sub><italic>T</italic></sub>, <italic>p</italic><sub><italic>s</italic></sub> and <italic>p</italic><sub><italic>t</italic></sub> are the model&#x00027;s predictions of the segmentation probability shaped of <italic>H</italic> &#x000D7; <italic>W</italic> &#x000D7; <italic>C</italic>. <italic>L</italic><sub><italic>S</italic></sub> is the cross-entropy loss defined as follows</p>
<disp-formula id="E3"><label>(3)</label><mml:math id="M5"><mml:mtable class="eqnarray" columnalign="right center left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>S</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>H</mml:mi><mml:mi>W</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>H</mml:mi><mml:mi>W</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:msubsup><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>c</mml:mi></mml:mrow></mml:msubsup><mml:mo class="qopname">log</mml:mo><mml:msubsup><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>c</mml:mi></mml:mrow></mml:msubsup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Since we expect our model to learn an accurate segmentation paradigm for both thick-slice images and thin-slice images, the <italic>L</italic><sub><italic>T</italic></sub> can be regarded as the distance between the probability distribution of the target domain (thin-slice domain) <italic>p</italic><sub><italic>t</italic></sub> and the uniform distribution <inline-formula><mml:math id="M6"><mml:mi>U</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:mfrac></mml:math></inline-formula>. Therefore, minimizing the distance between the two distributions enables classes to be more separable. Because it implicitly pushes the image features away from the decision boundary and makes alignment between two distributions. Mathematically, the objective function of thin-slice images is formulated as</p>
<disp-formula id="E4"><label>(4)</label><mml:math id="M7"><mml:mtable class="eqnarray" columnalign="right center left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>c</mml:mi></mml:mrow></mml:msubsup><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>U</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:mi>f</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>C</mml:mi><mml:msubsup><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>c</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Most existing methods (Vu et al., <xref ref-type="bibr" rid="B45">2019</xref>) would choose <italic>f</italic> (<italic>x</italic>) &#x0003D; <italic>x</italic> log <italic>x</italic>, which is alternatively named as the KL-divergence. However, one of the main obstacles is that when adopting KL-divergence for <italic>L</italic><sub><italic>T</italic></sub> as the objective function, the gradient of <italic>L</italic><sub><italic>T</italic></sub> would be extremely imbalanced between easy and hard samples. Taking the binary case as an example, the gradient can be computed as</p>
<disp-formula id="E5"><label>(5)</label><mml:math id="M8"><mml:mtable class="eqnarray" columnalign="right center left"><mml:mtr><mml:mtd><mml:mfrac><mml:mrow><mml:mi>&#x02202;</mml:mi><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mi>&#x02202;</mml:mi><mml:msubsup><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac><mml:mo>=</mml:mo><mml:mo class="qopname">log</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mo class="qopname">log</mml:mo><mml:msubsup><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>of which the increasing speed is faster as <inline-formula><mml:math id="M9"><mml:msubsup><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> becomes larger.</p>
<p>Therefore, to mitigate the unbalancing problem represented above, instead of choosing <italic>f</italic> (<italic>x</italic>) &#x0003D; <italic>x</italic> log <italic>x</italic>, we select Pearson &#x003C7;<sup>2</sup> divergence [i.e., <italic>f</italic> (<italic>x</italic>) &#x0003D; <italic>x</italic><sup>2</sup> &#x02212; 1] for <italic>L</italic><sub><italic>T</italic></sub>. Therefore, the gradient of <italic>L</italic><sub><italic>T</italic></sub> can be noted as</p>
<disp-formula id="E6"><label>(6)</label><mml:math id="M10"><mml:mtable class="eqnarray" columnalign="right center left"><mml:mtr><mml:mtd><mml:mfrac><mml:mrow><mml:mi>&#x02202;</mml:mi><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mi>&#x02202;</mml:mi><mml:msubsup><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac><mml:mo>=</mml:mo><mml:mn>2</mml:mn><mml:mo>-</mml:mo><mml:mn>4</mml:mn><mml:msubsup><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>which balances the gradient between easy and hard samples. During model training, the above loss functions were optimized iteratively. For testing, we fed each slice of images as the input and get the predicted segmentation.</p>
</sec>
</sec>
<sec id="s3">
<title>Result</title>
<sec>
<title>Diagnostic Efficiency</title>
<p>For experiments, we collected thick-slice and thin-slice samples from iNPH patients with different modalities (MRI and CT) as <xref ref-type="table" rid="T2">Table 2</xref> shows. It is of note that we only used the annotations from thick-slice images for our supervised deep learning. We investigated the performance of U-Net (Ronneberger et al., <xref ref-type="bibr" rid="B34">2015</xref>) and U-Net&#x0002B;&#x0002B; (Zhou et al., <xref ref-type="bibr" rid="B50">2018</xref>) on thin-slice and thick-slice images. Both U-Net and U-Net&#x0002B;&#x0002B; adopted encoders and decoders structure while using the middle features to maintain the information of images. As shown in <xref ref-type="table" rid="T3">Table 3</xref>, compared to conventional and state-of-the-art models, our method achieved significant improvement on both thick-slice and thin-slice images. To further illustrate, our method outperformed U-Net and U-Net&#x0002B;&#x0002B;, which are commonly used in medical segmentation, by a large margin. Besides, with the help of a pre-trained ResNet-34 encoder, our model could gain at most 0.1 Dice coefficient on thick-slice images.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>The number of thick-slice and thin-slice images used for our study.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Modality</bold></th>
<th valign="top" align="center" colspan="2" style="border-bottom: thin solid #000000;"><bold>The number of the training set</bold></th>
<th valign="top" align="center" colspan="2" style="border-bottom: thin solid #000000;"><bold>The number of the testing set</bold></th>
</tr>
<tr>
<th/>
<th valign="top" align="center"><bold>Thick-slice</bold></th>
<th valign="top" align="center"><bold>Thin-slice</bold></th>
<th valign="top" align="center"><bold>Thick-slice</bold></th>
<th valign="top" align="center"><bold>Thin-slice</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MRI</td>
<td valign="top" align="center">1,013</td>
<td valign="top" align="center">1,629</td>
<td valign="top" align="center">189</td>
<td valign="top" align="center">982</td>
</tr>
<tr>
<td valign="top" align="left">CT</td>
<td valign="top" align="center">2,611</td>
<td valign="top" align="center">2,595</td>
<td valign="top" align="center">309</td>
<td valign="top" align="center">492</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>For CT and MRI datasets, each dataset is divided into two groups. The training set is only used for model training and optimization, while the testing set is used to validate the effectiveness of the trained model</italic>.</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Comparison results (Dice) of our method vs. other state-of-the-art methods.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Method</bold></th>
<th valign="top" align="center" colspan="3" style="border-bottom: thin solid #000000;"><bold>MRI</bold></th>
<th valign="top" align="center" colspan="3" style="border-bottom: thin solid #000000;"><bold>CT</bold></th>
</tr>
<tr>
<th/>
<th valign="top" align="center"><bold>Thick</bold></th>
<th valign="top" align="center"><bold>Thin</bold></th>
<th valign="top" align="center"><bold>Mixed</bold></th>
<th valign="top" align="center"><bold>Thick</bold></th>
<th valign="top" align="center"><bold>Thin</bold></th>
<th valign="top" align="center"><bold>Mixed</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">U-Net (Ronneberger et al., <xref ref-type="bibr" rid="B34">2015</xref>)</td>
<td valign="top" align="center">0.9226</td>
<td valign="top" align="center">0.7665</td>
<td valign="top" align="center">0.8353</td>
<td valign="top" align="center">0.9351</td>
<td valign="top" align="center">0.7987</td>
<td valign="top" align="center">0.8513</td>
</tr>
<tr>
<td valign="top" align="left">U-Net&#x0002B;&#x0002B; (Zhou et al., <xref ref-type="bibr" rid="B50">2018</xref>)</td>
<td valign="top" align="center">0.9159</td>
<td valign="top" align="center">0.8495</td>
<td valign="top" align="center">0.8602</td>
<td valign="top" align="center"><bold>0.9421</bold></td>
<td valign="top" align="center">0.7797</td>
<td valign="top" align="center">0.8424</td>
</tr>
<tr>
<td valign="top" align="left">Ours</td>
<td valign="top" align="center"><bold>0.9323</bold></td>
<td valign="top" align="center"><bold>0.9056</bold></td>
<td valign="top" align="center"><bold>0.9099</bold></td>
<td valign="top" align="center">0.9365</td>
<td valign="top" align="center"><bold>0.8697</bold></td>
<td valign="top" align="center"><bold>0.8954</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>For each trained model, we tested it on thick-slice data only, thin-slice data only, and the combination of these two data. We can see that our method outperforms the other two state-of-the-art methods by a large margin, especially on the thin-slice images. Bold values indicate the best performance</italic>.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec>
<title>Component Analysis</title>
<p>To examine the influence of each component in our method, we conducted ablation studies to verify the effectiveness of our method, and the results are summarized in <xref ref-type="table" rid="T4">Table 4</xref>. We can observe that if our model only trained on thick-slice images, we can get comparable results on thick slices but the model cannot perform well on thin-slice images as shown in the first row of <xref ref-type="table" rid="T4">Table 4</xref>. However, without the annotation from images, the prediction results would be extremely unreliable since the objective function reached the global minimum when the probability of each class was assigned the same value. Moreover, when incorporating both thick-slice and thin-slice images into the training under the proposed semi-supervised paradigm, our method could result in better performance on thin-slice images by at least 3.5% improvement on the Dice coefficient compared to the model in Exp 1, while it only sacrificed little performance on thick images. The rationale behind this is that our model can learn a shared feature representation for both thick-slice and thin-slice images, which can be beneficial for handling different types of images.</p>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Dice coefficient comparison for our ablation studies.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Exp</bold>.</th>
<th valign="top" align="center"><bold>Thick</bold></th>
<th valign="top" align="center"><bold>Thin</bold></th>
<th valign="top" align="center" colspan="3" style="border-bottom: thin solid #000000;"><bold>MRI</bold></th>
<th valign="top" align="center" colspan="3" style="border-bottom: thin solid #000000;"><bold>CT</bold></th>
</tr>
<tr>
<th/>
<th/>
<th/>
<th valign="top" align="center"><bold>Thick</bold></th>
<th valign="top" align="center"><bold>Thin</bold></th>
<th valign="top" align="center"><bold>Mixed</bold></th>
<th valign="top" align="center"><bold>Thick</bold></th>
<th valign="top" align="center"><bold>Thin</bold></th>
<th valign="top" align="center"><bold>Mixed</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">1</td>
<td valign="top" align="center">&#x0221A;</td>
<td/>
<td valign="top" align="center"><bold>0.9390</bold></td>
<td valign="top" align="center">0.8199</td>
<td valign="top" align="center">0.8391</td>
<td valign="top" align="center"><bold>0.9438</bold></td>
<td valign="top" align="center">0.8345</td>
<td valign="top" align="center">0.8767</td>
</tr>
<tr>
<td valign="top" align="left">2</td>
<td/>
<td valign="top" align="center">&#x0221A;</td>
<td valign="top" align="center">0.0034</td>
<td valign="top" align="center">0.0108</td>
<td valign="top" align="center">0.0110</td>
<td valign="top" align="center">0.0109</td>
<td valign="top" align="center">0.0006</td>
<td valign="top" align="center">0.0069</td>
</tr>
<tr>
<td valign="top" align="left">3</td>
<td valign="top" align="center">&#x0221A;</td>
<td valign="top" align="center">&#x0221A;</td>
<td valign="top" align="center">0.9323</td>
<td valign="top" align="center"><bold>0.9056</bold></td>
<td valign="top" align="center"><bold>0.9099</bold></td>
<td valign="top" align="center">0.9365</td>
<td valign="top" align="center"><bold>0.8697</bold></td>
<td valign="top" align="center"><bold>0.8954</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>Trained our model with different thickness images. &#x0201C;Thick&#x0201D; represents our model is trained with labeled thick-slice images, and &#x0201C;Thin&#x0201D; means the model is trained with thin-slice images without annotations. We can see with the help of the semi-supervised training technique we proposed, our method can gain a significant improvement on thin-slice images. Bold values indicate the best performance</italic>.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec>
<title>Qualitative Analysis</title>
<p><xref ref-type="fig" rid="F3">Figure 3</xref> shows the example segmentation results on randomly selected thin-slice images from the testing set for both MRI and CT modalities. In the second column of each modality, it can be observed that U-Net performed poorly on MRI images. Meanwhile, U-Net&#x0002B;&#x0002B;, which is the updated version of U-Net, showed better predictions compared to the U-Net while they were still not accurate. Compared with the U-Net and U-Net&#x0002B;&#x0002B;, our method achieved accurate results and could segment both MRI and CT images with high precision. Particularly, in the last row of the CT example, although the original image had low contrast, our method was still able to recognize each part and segmented the data accurately, which has demonstrated the robustness of our method. Divide patients with acquired hydrocephalus into Subarachnoid hemorrhage group, brain trauma group, and brain tumor group according to the cause of the disease. Using our method to automatically segment the images of the three groups of patients, the results of CT images show that the Dice of the three groups are 0.94, 0.95, and 0.94, respectively. The results of the MRI image showed that the Dice of the three groups were 0.91, 0.89, 0.92, respectively (<xref ref-type="table" rid="T5">Table 5</xref>).</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>The visualization of segmentation results for thin-slice with MRI images and CT images. <bold>(A)</bold> The completeness of segmentation indicates the performance of each model on MRI images in which our method achieves the best. <bold>(B)</bold> Our method is superior to other competing approaches on CT images, specifically for low contrast images (The last row).</p></caption>
<graphic xlink:href="fnagi-12-618538-g0003.tif"/>
</fig>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Thick-slice image segmentation results(Dice) of acquired hydrocephalus.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Method</bold></th>
<th valign="top" align="center" colspan="3" style="border-bottom: thin solid #000000;"><bold>CT</bold></th>
<th valign="top" align="center" colspan="3" style="border-bottom: thin solid #000000;"><bold>MRI</bold></th>
</tr>
<tr>
<th/>
<th valign="top" align="center"><bold>Subarachnoid hemorrhage</bold></th>
<th valign="top" align="center"><bold>Trauma</bold></th>
<th valign="top" align="center"><bold>Tumor</bold></th>
<th valign="top" align="center"><bold>Subarachnoid hemorrhage</bold></th>
<th valign="top" align="center"><bold>Trauma</bold></th>
<th valign="top" align="center"><bold>Tumor</bold></th>
</tr>
<tr>
<th/>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 24)</bold></th>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 13)</bold></th>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 3)</bold></th>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 24)</bold></th>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 10)</bold></th>
<th valign="top" align="center"><bold>(<italic>n</italic> &#x0003D; 4)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">U-Net</td>
<td valign="top" align="center">0.9091</td>
<td valign="top" align="center">0.9143</td>
<td valign="top" align="center"><bold>0.9387</bold></td>
<td valign="top" align="center">0.9085</td>
<td valign="top" align="center">0.8729</td>
<td valign="top" align="center">0.92</td>
</tr>
<tr>
<td valign="top" align="left">U-Net&#x0002B;&#x0002B;</td>
<td valign="top" align="center">0.8903</td>
<td valign="top" align="center">0.8835</td>
<td valign="top" align="center">0.9219</td>
<td valign="top" align="center">0.9004</td>
<td valign="top" align="center">0.8649</td>
<td valign="top" align="center"><bold>0.9274</bold></td>
</tr>
<tr>
<td valign="top" align="left">Ours</td>
<td valign="top" align="center"><bold>0.9407</bold></td>
<td valign="top" align="center"><bold>0.9454</bold></td>
<td valign="top" align="center">0.9364</td>
<td valign="top" align="center"><bold>0.9105</bold></td>
<td valign="top" align="center"><bold>0.8919</bold></td>
<td valign="top" align="center">0.9231</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>Divide patients with acquired hydrocephalus into cerebral hemorrhage group, brain trauma group, and brain tumor group according to the cause of the disease. Using our method to automatically segment the images of the three groups of patients, the results of CT images show that the Dice of the three groups are 0.94, 0.95, and 0.94, respectively. The results of the MRI image showed that the Dice of the three groups were 0.91, 0.89, 0.92, respectively. Bold values indicate the best performance</italic>.</p>
</table-wrap-foot>
</table-wrap>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>Discussion</title>
<p>Through <xref ref-type="fig" rid="F2">Figures 2</xref>, <xref ref-type="fig" rid="F3">3</xref> and <xref ref-type="table" rid="T2">Tables 2</xref>&#x02013;<xref ref-type="table" rid="T4">4</xref>, we can observe that our automated ventricle segmentation method can be successfully applied CT and MRI images with different thicknesses. More importantly, the segmentation results obtained are better (Dice &#x0003E; 0.9) compared to widely used U-Net and its advanced version U-Net&#x0002B;&#x0002B;. There is no doubt that the proposed method is promising for different clinical scenarios.</p>
<sec>
<title>Clinical Significance of the Automated Ventricle Segmentation</title>
<p>Changes in the shape and size of the ventricles are associated with many diseases, and relevance ventricular enlargement is a crucial marker of brain atrophy associated with normal or pathological aging processes (Schoemaker et al., <xref ref-type="bibr" rid="B36">2019</xref>). Ventricular enlargement also represents a feasible short-term marker of disease progression in mild cognitive impairment and Alzheimer&#x00027;s disease (Nestor et al., <xref ref-type="bibr" rid="B28">2008</xref>). Ventricular enlargement can occur early in the course of Parkinson&#x00027;s disease and is associated with cognitive decline (Apostolova et al., <xref ref-type="bibr" rid="B3">2012</xref>). New/enlarging T2w lesions adjacent to the ventricle wall and thalamic atrophy are independently associated with lateral ventricular enlargement in multiple sclerosis (Sinnecker et al., <xref ref-type="bibr" rid="B40">2020</xref>). Ventricular enlargement is also arguably the most consistent neuroanatomical biomarker present in schizophrenia (Sayo et al., <xref ref-type="bibr" rid="B35">2012</xref>). Whether it is to detect early disease (Dalaker et al., <xref ref-type="bibr" rid="B11">2011</xref>), evaluate the condition of the patient (Ferrarini et al., <xref ref-type="bibr" rid="B14">2008</xref>), diagnose the disease (Relkin et al., <xref ref-type="bibr" rid="B33">2005</xref>), evaluate the effect of surgery (Neikter et al., <xref ref-type="bibr" rid="B27">2020</xref>) or other aspects, accurate measurement of the size of the ventricles has very important clinical significance (Shi et al., <xref ref-type="bibr" rid="B38">2015</xref>).</p>
<p>When following patients with hydrocephalus, the timing for intervention is difficult to decide for clinicians. Therefore, by providing clinicians with an accurate measure of increased ventricle volume, automated ventricular segmentation techniques would give them more information to make their decisions (Qiu et al., <xref ref-type="bibr" rid="B32">2015</xref>). The segmentation of ventricles provides quantitative measures on the changes of ventricles in the brain that form vital diagnostics information (Chen et al., <xref ref-type="bibr" rid="B6">2009</xref>). The automated segmentation of ventricles can assist in making a differential diagnosis of ischemic stroke. The quantitative measurement of the ventricles can be helpful in a treatment, recovery, and follow-up process. The segmented ventricles can also serve as the reference in determining the spatial position of the infarct, which can provide useful information for treatment planning (Poh et al., <xref ref-type="bibr" rid="B30">2012</xref>). Accurate and automated segmentation and labeling tools enable more sophisticated evaluations of the ventricular system in neurodegenerative diseases, cerebrospinal fluid disorders, as well as in normal aging (Shao et al., <xref ref-type="bibr" rid="B37">2019</xref>). Various diseases affect the size and morphology of the ventricles, and knowledge of the normal and abnormal ventricular system is essential in understanding various pathological states. For these reasons, it is critical to extract the ventricular system to ascertain its morphology and volume (Xia et al., <xref ref-type="bibr" rid="B48">2004</xref>). By manually labeling the ventricles, the time required to measure the volume and relative ventricle volume of each subject is about 30 min, which is acceptable in research, but obviously not feasible in clinical practice (Ambarki et al., <xref ref-type="bibr" rid="B1">2010</xref>). Using the automatic ventricle segmentation method can save time significantly. Besides, the unsupervised segmentation method can leverage the dependency of labeled data which is more practical for a real-world scenario. For instance, Liu et al. (<xref ref-type="bibr" rid="B21">2019</xref>) utilized the quality of merged segmentation results to update the ensemble weights of different segmentation results in order to achieve accurate segmentation results. Meanwhile, Ganaye et al. (<xref ref-type="bibr" rid="B15">2018</xref>) took advantage of the invariant nature of the anatomical structure to improve the robustness of the segmentation results by applying semantic constraints.</p>
</sec>
<sec>
<title>Comparison Studies and the Advantages of Our Proposed Method</title>
<p>Huff et al. mentioned the limitations of their research for an automated ventricle segmentation method that all the studies were performed on similar CT scanners with similar acquisition parameters and identical slice thicknesses. No pathological ventricles were included in this study other than simply enlarged ventricles (Huff et al., <xref ref-type="bibr" rid="B17">2019</xref>). In our research, we can see that data acquired from different scanners were validated, the thickness of the scan layer was also different, and pathological ventricles were also included. Qiu et al. (<xref ref-type="bibr" rid="B32">2015</xref>) outlined the limitations of their study as validated on a limited number of images since MR images of preterm neonates were usually not performed at our center unless a severe disease was suspected. Because of this, we did not deliberately make requests when selecting patients. As our goal is to automatically segment the ventricles, the image we chose must be systematic and comprehensive. The shape and size of the ventricles of the four groups of patients can represent the ventricles of the elderly cohort. Kocaman et al. (<xref ref-type="bibr" rid="B20">2019</xref>) performed a study on a small number of individuals. The actual sample size of our four groups of patients is large, and in clinical practice, these patients are relatively common. As the sample size increased, the results of our automated ventricle segmentation method were also gradually stabilized.</p>
<p>Xia et al. pointed out the limitations of their research on automated extraction of the ventricular system: When the slice thickness, especially in coronal and axial directions, is too high, the algorithm could not work satisfactorily. Most of the subjects tested did not have any pathology or major distortion of the ventricles (Xia et al., <xref ref-type="bibr" rid="B48">2004</xref>). In our research, the thickness of the scan slice was no longer a confounding factor. Both thick-slice and thin-slice images could be processed with better ventricle segmentation results. At the same time, our patients included not only normal elderly people but also brain atrophy elderly people with slight changes in the ventricle shape and size. Besides, our proposed framework also performed well for iNPH patients with significant changes in the shape and size of the ventricles. More importantly, the elderly with acquired hydrocephalus with obvious changes in intracranial structures caused by trauma, tumor, hemorrhage, and other conditions were also included. Shao et al. mentioned in their brain ventricle parcellation work that the proposed network was also robust to white matter hyperintensities (WMH), which were often associated with NPH and located adjacent to the lateral ventricles. WMH can sometimes negatively affect the outcome of automated segmentation algorithms (Shao et al., <xref ref-type="bibr" rid="B37">2019</xref>). Similarly, some of our patients were also NPH patients, and they had white matter hyperintensities around their lateral ventricles. Some patients with brain atrophy had a similar situation, but our ventricle segmentation framework could handle it.</p>
</sec>
<sec>
<title>Influence of Examination Type and Slice Thickness on Ventricle Segmentation</title>
<p>In clinical work, we often choose CT or MRI for head imaging examination. Both methods have their advantages and disadvantages. For CT, it is relatively convenient to operate, no need to worry about metal implantation, and the inspection speed is fast. But it has ionizing radiation to the human body, and it has a low signal to noise ratio and relatively low contrast. For MRI, it does not produce ionizing radiation and can provide better soft-tissue resolution, but its inspection time is long and may have considerable issues such as metal implantation and claustrophobia and other related problems (Chen et al., <xref ref-type="bibr" rid="B6">2009</xref>; Liu et al., <xref ref-type="bibr" rid="B22">2010</xref>; Coupe et al., <xref ref-type="bibr" rid="B10">2011</xref>; Poh et al., <xref ref-type="bibr" rid="B30">2012</xref>; Qian et al., <xref ref-type="bibr" rid="B31">2017</xref>; Huff et al., <xref ref-type="bibr" rid="B17">2019</xref>). It is well known that in medical images partial volume effect is inevitable. Reducing the slice thickness can reduce the partial volume effect. But for CT examinations, this means that patients have to receive more ionizing radiation, and for MRI examinations, the examination time will be longer. For deep learning, more content means more information, so thin-layer images are naturally the best choice. However, in clinical practice, because of the heavy burden for a large patient population, thick-slice scanning is still the most used acquisition method. But for the segmentation of the ventricles from thick-slice images, the number of images per patient is small, and the information that can be extracted is also limited. Coupled with the influence of the partial volume effect, it is often difficult to segment the boundaries of the ventricles from thick-slice images (Xia et al., <xref ref-type="bibr" rid="B48">2004</xref>). The stroke area on the CT image is often adjacent or connected to the ventricle area, and the grayscale is similar, which increases the difficulty of accurately segmenting the ventricle (Qian et al., <xref ref-type="bibr" rid="B31">2017</xref>). In addition, on the CT image, due to the noise and low contrast between the soft tissues, there is no obvious peak in the cerebrospinal fluid in the whole brain intensity histogram. This makes it difficult to find a suitable threshold for cerebrospinal fluid using traditional histogram-based segmentation methods (Liu et al., <xref ref-type="bibr" rid="B22">2010</xref>). Part of the volume effect will affect the segmentation of the ventricle, especially on MR images with limited resolution (Coupe et al., <xref ref-type="bibr" rid="B10">2011</xref>). Due to the partial volume effect, there exist transition regions between the Cerebrospinal fluid and gray matter, if these transition regions are completely excluded, the ventricular system is under-segmented, and some ventricular components, for example, the lateral ventricles, may be broken into several disconnected parts (Liu et al., <xref ref-type="bibr" rid="B23">2009</xref>). The temporal horns and occipital poles of the ventricle can be separated from the main body. When the shape-based ventricle segmentation method and the ventricle segmentation method based on the regional growth technology are used, the results will be affected. In addition, the signal intensity of the choroidal plexus is similar to that of gray matter. When a simple threshold technique is used to segment the ventricle, the result will also be affected (Coupe et al., <xref ref-type="bibr" rid="B10">2011</xref>). All in all, different imaging data and slice thickness have their advantages and disadvantages, and they also have a different impact on automated segmentation methods.</p>
</sec>
<sec>
<title>Limitations of Our Automated Ventricle Segmentation Framework</title>
<p>Because our current work was a retrospective study based on the elderly to establish a new systematic automated ventricle segmentation method. Therefore, our research might still have some limitations. First of all, because this study selected elderly patients, our method might have insufficient capacity to deal with pediatric patients. Secondly, because this was multi-center and multi-modal research, in terms of results, our goal was to perform well as a whole, so the expression of results in some respects was bound to be relatively weakened. When processing cross-hospital data, we need to handle extensive re-training of the model to ensure the accuracy of the running results. As a deep learning based model, the training data collected at one site are often unavailable to others due to privacy and legal issues (Wang et al., <xref ref-type="bibr" rid="B46">2020a</xref>,<xref ref-type="bibr" rid="B47">b</xref>).</p>
<p>In future research, we will focus on extracting different imaging and biological features through deep learning of images, laboratory test results, and clinical information of patients with abnormal ventricles. We will achieve a systematic and comprehensive analysis of patients with ventricular abnormalities, and determine whether the patient has a certain disease that can cause ventricular abnormalities.</p>
</sec>
</sec>
<sec sec-type="conclusions" id="s5">
<title>Conclusion</title>
<p>In order to systematically and comprehensively assess the size of the ventricle of elderly patients, we have established an automated ventricle segmentation method. This automated ventricle segmentation method can not only be applied to both CT and MRI images but can be also applied to images with different slice thicknesses. More importantly, it produces superior segmentation results. Deploying this automated ventricle segmentation method in the clinical scenarios can help doctors to find and diagnose early disease, evaluate the progress of the patient&#x00027;s condition, and inform the treatment planning for the patients. At the same time, the medical image scanning method and the slice thickness are no longer limitations for automated ventricle segmentation. There is no doubt that the proposed method will have a wide application in clinical studies.</p>
</sec>
<sec sec-type="data-availability-statement" id="s6">
<title>Data Availability Statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author/s.</p>
</sec>
<sec id="s7">
<title>Ethics Statement</title>
<p>The studies involving human participants were reviewed and approved by Ethics Committee of The First Affiliated Hospital of Shenzhen University and Shenzhen Second People&#x00027;s hospital. Written informed consent for participation was not required for this study in accordance with the national legislation and the institutional requirements.</p>
</sec>
<sec id="s8">
<title>Author Contributions</title>
<p>XZ, QY, YJ, JX, and GY conceived and designed the study, contributed to data analysis, contributed to data interpretation, and contributed to the writing of the report. XZ, ZN, WM-S, EF, ZL, and JX contributed to the literature search. XZ and JX contributed to data collection. XZ, QY, YJ, and MW performed data curation and contributed to the tables and figures. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of Interest</title>
<p>ZN and WM-S are employed by Aladdin Healthcare Technologies Ltd. QY, YJ, and MW are employed by Hangzhou Ocean&#x00027;s Smart Boya Co., Ltd., China and Mind Rank Ltd., China. The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
</body>
<back>
<ack><p>The authors would like to sincerely thank Meng-Yao Xu for her support and suggestions for the production of diagrams and tables throughout the entire project.</p></ack>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ambarki</surname> <given-names>K.</given-names></name> <name><surname>Israelsson</surname> <given-names>H.</given-names></name> <name><surname>Wahlin</surname> <given-names>A.</given-names></name> <name><surname>Birgander</surname> <given-names>R.</given-names></name> <name><surname>Eklund</surname> <given-names>A.</given-names></name> <name><surname>Malm</surname> <given-names>J.</given-names></name></person-group> (<year>2010</year>). <article-title>Brain ventricular size in healthy elderly: comparison between Evans index and volume measurement</article-title>. <source>Neurosurgery 67</source>, <fpage>94</fpage>&#x02013;<lpage>99</lpage>. <pub-id pub-id-type="doi">10.1227/01.NEU.0000370939.30003.D1</pub-id><pub-id pub-id-type="pmid">20559096</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Anandh</surname> <given-names>K. R.</given-names></name> <name><surname>Sujatha</surname> <given-names>C. M.</given-names></name> <name><surname>Ramakrishnan</surname> <given-names>S.</given-names></name></person-group> (<year>2016</year>). <article-title>A method to differentiate mild cognitive impairment and Alzheimer in MR images using Eigen value descriptors</article-title>. <source>J. Med. Syst.</source> <volume>40</volume>:<fpage>25</fpage>. <pub-id pub-id-type="doi">10.1007/s10916-015-0396-y</pub-id><pub-id pub-id-type="pmid">26547845</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Apostolova</surname> <given-names>L.</given-names></name> <name><surname>Alves</surname> <given-names>G.</given-names></name> <name><surname>Hwang</surname> <given-names>K. S.</given-names></name> <name><surname>Babakchanian</surname> <given-names>S.</given-names></name> <name><surname>Bronnick</surname> <given-names>K. S.</given-names></name> <name><surname>Larsen</surname> <given-names>J. P.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Hippocampal and ventricular changes in Parkinson&#x00027;s disease mild cognitive impairment</article-title>. <source>Neurobiol. Aging</source> <volume>33</volume>, <fpage>2113</fpage>&#x02013;<lpage>2124</lpage>. <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2011.06.014</pub-id><pub-id pub-id-type="pmid">21813212</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brix</surname> <given-names>M. K.</given-names></name> <name><surname>Westman</surname> <given-names>E.</given-names></name> <name><surname>Simmons</surname> <given-names>A.</given-names></name> <name><surname>Ringstad</surname> <given-names>G. A.</given-names></name> <name><surname>Eide</surname> <given-names>P. K.</given-names></name> <name><surname>Wagner-Larsen</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>The Evans&#x00027; Index revisited: new cut-off levels for use in radiological assessment of ventricular enlargement in the elderly</article-title>. <source>Eur. J. Radiol.</source> <volume>95</volume>, <fpage>28</fpage>&#x02013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1016/j.ejrad.2017.07.013</pub-id><pub-id pub-id-type="pmid">28987681</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cavedo</surname> <given-names>E.</given-names></name> <name><surname>Galluzzi</surname> <given-names>S.</given-names></name> <name><surname>Pievani</surname> <given-names>M.</given-names></name> <name><surname>Boccardi</surname> <given-names>M.</given-names></name> <name><surname>Frisoni</surname> <given-names>G. B.</given-names></name></person-group> (<year>2012</year>). <article-title>Norms for imaging markers of brain reserve</article-title>. <source>J. Alzheimers Dis.</source> <volume>31</volume>, <fpage>623</fpage>&#x02013;<lpage>633</lpage>. <pub-id pub-id-type="doi">10.3233/JAD-2012-111817</pub-id><pub-id pub-id-type="pmid">22672878</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>W.</given-names></name> <name><surname>Smith</surname> <given-names>R.</given-names></name> <name><surname>Ji</surname> <given-names>S. Y.</given-names></name> <name><surname>Ward</surname> <given-names>K. R.</given-names></name> <name><surname>Najarian</surname> <given-names>K.</given-names></name></person-group> (<year>2009</year>). <article-title>Automated ventricular systems segmentation in brain CT images by combining low-level segmentation and high-level template matching</article-title>. <source>BMC Med. Inform. Decis. Mak.</source> <volume>9</volume> (<supplement>Suppl. 1</supplement>):<fpage>S4</fpage>. <pub-id pub-id-type="doi">10.1186/1472-6947-9-S1-S4</pub-id><pub-id pub-id-type="pmid">19891798</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cherukuri</surname> <given-names>V.</given-names></name> <name><surname>Ssenyonga</surname> <given-names>P.</given-names></name> <name><surname>Warf</surname> <given-names>B. C.</given-names></name> <name><surname>Kulkarni</surname> <given-names>A. V.</given-names></name> <name><surname>Monga</surname> <given-names>V.</given-names></name> <name><surname>Schiff</surname> <given-names>S. J.</given-names></name></person-group> (<year>2018</year>). <article-title>Learning based segmentation of CT brain images: application to postoperative hydrocephalic scans</article-title>. <source>IEEE Trans. Biomed. Eng.</source> <volume>65</volume>, <fpage>1871</fpage>&#x02013;<lpage>1884</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2017.2783305</pub-id><pub-id pub-id-type="pmid">29989926</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chou</surname> <given-names>Y. Y.</given-names></name> <name><surname>Lepore</surname> <given-names>N.</given-names></name> <name><surname>Avedissian</surname> <given-names>C.</given-names></name> <name><surname>Madsen</surname> <given-names>S. K.</given-names></name> <name><surname>Parikshak</surname> <given-names>N.</given-names></name> <name><surname>Hua</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>Mapping correlations between ventricular expansion and CSF amyloid and tau biomarkers in 240 subjects with Alzheimer&#x00027;s disease, mild cognitive impairment and elderly controls</article-title>. <source>Neuroimage</source> <volume>46</volume>, <fpage>394</fpage>&#x02013;<lpage>410</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2009.02.015</pub-id><pub-id pub-id-type="pmid">19236926</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chou</surname> <given-names>Y. Y.</given-names></name> <name><surname>Lepore</surname> <given-names>N.</given-names></name> <name><surname>de Zubicaray</surname> <given-names>G. I.</given-names></name> <name><surname>Carmichael</surname> <given-names>O. T.</given-names></name> <name><surname>Becker</surname> <given-names>J. T.</given-names></name> <name><surname>Toga</surname> <given-names>A. W.</given-names></name> <etal/></person-group>. (<year>2008</year>). <article-title>Automated ventricular mapping with multi-atlas fluid image alignment reveals genetic effects in Alzheimer&#x00027;s disease</article-title>. <source>Neuroimage</source> <volume>40</volume>, <fpage>615</fpage>&#x02013;<lpage>630</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2007.11.047</pub-id><pub-id pub-id-type="pmid">18222096</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coupe</surname> <given-names>P.</given-names></name> <name><surname>Manjon</surname> <given-names>J. V.</given-names></name> <name><surname>Fonov</surname> <given-names>V.</given-names></name> <name><surname>Pruessner</surname> <given-names>J.</given-names></name> <name><surname>Robles</surname> <given-names>M.</given-names></name> <name><surname>Collins</surname> <given-names>D. L.</given-names></name></person-group> (<year>2011</year>). <article-title>Patch-based segmentation using expert priors: application to hippocampus and ventricle segmentation</article-title>. <source>Neuroimage</source> <volume>54</volume>, <fpage>940</fpage>&#x02013;<lpage>954</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2010.09.018</pub-id><pub-id pub-id-type="pmid">20851199</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dalaker</surname> <given-names>T. O.</given-names></name> <name><surname>Zivadinov</surname> <given-names>R.</given-names></name> <name><surname>Ramasamy</surname> <given-names>D. P.</given-names></name> <name><surname>Beyer</surname> <given-names>M. K.</given-names></name> <name><surname>Alves</surname> <given-names>G.</given-names></name> <name><surname>Bronnick</surname> <given-names>K. S.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Ventricular enlargement and mild cognitive impairment in early Parkinson&#x00027;s disease</article-title>. <source>Mov. Disord.</source> <volume>26</volume>, <fpage>297</fpage>&#x02013;<lpage>301</lpage>. <pub-id pub-id-type="doi">10.1002/mds.23443</pub-id><pub-id pub-id-type="pmid">21412836</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Del</surname> <given-names>R. E.</given-names></name> <name><surname>Konishi</surname> <given-names>J.</given-names></name> <name><surname>Bouix</surname> <given-names>S.</given-names></name> <name><surname>Blokland</surname> <given-names>G. A.</given-names></name> <name><surname>Mesholam-Gately</surname> <given-names>R. I.</given-names></name> <name><surname>Goldstein</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Enlarged lateral ventricles inversely correlate with reduced corpus callosum central volume in first episode schizophrenia: association with functional measures</article-title>. <source>Brain Imaging Behav.</source> <volume>10</volume>, <fpage>1264</fpage>&#x02013;<lpage>1273</lpage>. <pub-id pub-id-type="doi">10.1007/s11682-015-9493-2</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dubost</surname> <given-names>F.</given-names></name> <name><surname>Bruijne</surname> <given-names>M.</given-names></name> <name><surname>Nardin</surname> <given-names>M.</given-names></name> <name><surname>Dalca</surname> <given-names>A. V.</given-names></name> <name><surname>Donahue</surname> <given-names>K. L.</given-names></name> <name><surname>Giese</surname> <given-names>A. K.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Multi-atlas image registration of clinical data with automated quality assessment using ventricle segmentation</article-title>. <source>Med. Image Anal.</source> <volume>63</volume>:<fpage>101698</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2020.101698</pub-id><pub-id pub-id-type="pmid">32339896</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ferrarini</surname> <given-names>L.</given-names></name> <name><surname>Palm</surname> <given-names>W. M.</given-names></name> <name><surname>Olofsen</surname> <given-names>H.</given-names></name> <name><surname>van der Landen</surname> <given-names>R.</given-names></name> <name><surname>Jan</surname> <given-names>B. G.</given-names></name> <name><surname>Westendorp</surname> <given-names>R. G.</given-names></name> <etal/></person-group>. (<year>2008</year>). <article-title>MMSE scores correlate with local ventricular enlargement in the spectrum from cognitively normal to Alzheimer disease</article-title>. <source>Neuroimage</source> <volume>39</volume>, <fpage>1832</fpage>&#x02013;<lpage>1838</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2007.11.003</pub-id><pub-id pub-id-type="pmid">18160312</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Ganaye</surname> <given-names>P. A.</given-names></name> <name><surname>Sdika</surname> <given-names>M.</given-names></name> <name><surname>Benoit-Cattin</surname> <given-names>H.</given-names></name></person-group> (<year>2018</year>). <article-title>&#x0201C;Semi-supervised learning for segmentation under semantic constraint,&#x0201D;</article-title> in <source>International Conference on Medical Image Computing and Computer-Assisted Intervention</source> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>595</fpage>&#x02013;<lpage>602</lpage>.</citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>He</surname> <given-names>W.</given-names></name> <name><surname>Fang</surname> <given-names>X.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Gao</surname> <given-names>P.</given-names></name> <name><surname>Gao</surname> <given-names>X.</given-names></name> <name><surname>Zhou</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>A new index for assessing cerebral ventricular volume in idiopathic normal-pressure hydrocephalus: a comparison with Evans&#x00027; index</article-title>. <source>Neuroradiology</source> <volume>62</volume>, <fpage>661</fpage>&#x02013;<lpage>667</lpage>. <pub-id pub-id-type="doi">10.1007/s00234-020-02361-8</pub-id><pub-id pub-id-type="pmid">32008047</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huff</surname> <given-names>T. J.</given-names></name> <name><surname>Ludwig</surname> <given-names>P. E.</given-names></name> <name><surname>Salazar</surname> <given-names>D.</given-names></name> <name><surname>Cramer</surname> <given-names>J. A.</given-names></name></person-group> (<year>2019</year>). <article-title>Fully automated intracranial ventricle segmentation on CT with 2D regional convolutional neural network to estimate ventricular volume</article-title>. <source>Int. J. Comput. Assist. Radiol. Surg.</source> <volume>14</volume>, <fpage>1923</fpage>&#x02013;<lpage>1932</lpage>. <pub-id pub-id-type="doi">10.1007/s11548-019-02038-5</pub-id><pub-id pub-id-type="pmid">31350705</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kempton</surname> <given-names>M. J.</given-names></name> <name><surname>Underwood</surname> <given-names>T. S.</given-names></name> <name><surname>Brunton</surname> <given-names>S.</given-names></name> <name><surname>Stylios</surname> <given-names>F.</given-names></name> <name><surname>Schmechtig</surname> <given-names>A.</given-names></name> <name><surname>Ettinger</surname> <given-names>U.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>A comprehensive testing protocol for MRI neuroanatomical segmentation techniques: evaluation of a novel lateral ventricle segmentation method</article-title>. <source>Neuroimage</source> <volume>58</volume>, <fpage>1051</fpage>&#x02013;<lpage>1059</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.06.080</pub-id><pub-id pub-id-type="pmid">21835253</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khan</surname> <given-names>A. F.</given-names></name> <name><surname>Drozd</surname> <given-names>J. J.</given-names></name> <name><surname>Moreland</surname> <given-names>R. K.</given-names></name> <name><surname>Ta</surname> <given-names>R. M.</given-names></name> <name><surname>Borrie</surname> <given-names>M. J.</given-names></name> <name><surname>Bartha</surname> <given-names>R.</given-names></name></person-group> (<year>2012</year>). <article-title>A novel MRI-compatible brain ventricle phantom for validation of segmentation and volumetry methods</article-title>. <source>J. Magn. Reson. Imaging</source> <volume>36</volume>, <fpage>476</fpage>&#x02013;<lpage>482</lpage>. <pub-id pub-id-type="doi">10.1002/jmri.23612</pub-id><pub-id pub-id-type="pmid">22396226</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kocaman</surname> <given-names>H.</given-names></name> <name><surname>Acer</surname> <given-names>N.</given-names></name> <name><surname>Koseoglu</surname> <given-names>E.</given-names></name> <name><surname>Gultekin</surname> <given-names>M.</given-names></name> <name><surname>Donmez</surname> <given-names>H.</given-names></name></person-group> (<year>2019</year>). <article-title>Evaluation of intracerebral ventricles volume of patients with Parkinson&#x00027;s disease using the atlas-based method: a methodological study</article-title>. <source>J. Chem. Neuroanat.</source> <volume>98</volume>, <fpage>124</fpage>&#x02013;<lpage>130</lpage>. <pub-id pub-id-type="doi">10.1016/j.jchemneu.2019.04.005</pub-id><pub-id pub-id-type="pmid">30986488</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>B.</given-names></name> <name><surname>Gu</surname> <given-names>L.</given-names></name> <name><surname>Lu</surname> <given-names>F.</given-names></name></person-group> (<year>2019</year>). <article-title>&#x0201C;Unsupervised ensemble strategy for retinal vessel segmentation,&#x0201D;</article-title> in <source>International Conference on Medical Image Computing and Computer-Assisted Intervention</source> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>).</citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Huang</surname> <given-names>S.</given-names></name> <name><surname>Ihar</surname> <given-names>V.</given-names></name> <name><surname>Ambrosius</surname> <given-names>W.</given-names></name> <name><surname>Lee</surname> <given-names>L. C.</given-names></name> <name><surname>Nowinski</surname> <given-names>W. L.</given-names></name></person-group> (<year>2010</year>). <article-title>Automatic model-guided segmentation of the human brain ventricular system from CT images</article-title>. <source>Acad. Radiol.</source> <volume>17</volume>, <fpage>718</fpage>&#x02013;<lpage>726</lpage>. <pub-id pub-id-type="doi">10.1016/j.acra.2010.02.013</pub-id><pub-id pub-id-type="pmid">20457415</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Huang</surname> <given-names>S.</given-names></name> <name><surname>Nowinski</surname> <given-names>W. L.</given-names></name></person-group> (<year>2009</year>). <article-title>Automatic segmentation of the human brain ventricles from MR images by knowledge-based region growing and trimming</article-title>. <source>Neuroinformatics</source> <volume>7</volume>, <fpage>131</fpage>&#x02013;<lpage>146</lpage>. <pub-id pub-id-type="doi">10.1007/s12021-009-9046-1</pub-id><pub-id pub-id-type="pmid">19449142</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lundervold</surname> <given-names>A. J.</given-names></name> <name><surname>Vik</surname> <given-names>A.</given-names></name> <name><surname>Lundervold</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>Lateral ventricle volume trajectories predict response inhibition in older age-A longitudinal brain imaging and machine learning approach</article-title>. <source>PLoS ONE</source> <volume>14</volume>:<fpage>e207967</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0207967</pub-id><pub-id pub-id-type="pmid">30939173</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Milletari</surname> <given-names>F.</given-names></name> <name><surname>Navab</surname> <given-names>N.</given-names></name> <name><surname>Ahmadi</surname> <given-names>S. A.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x0201C;V-net: fully convolutional neural networks for volumetric medical image segmentation,&#x0201D;</article-title> in <source>2016 Fourth International Conference on 3D Vision (3DV)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>565</fpage>&#x02013;<lpage>571</lpage>.</citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mori</surname> <given-names>E.</given-names></name> <name><surname>Ishikawa</surname> <given-names>M.</given-names></name> <name><surname>Kato</surname> <given-names>T.</given-names></name> <name><surname>Kazui</surname> <given-names>H.</given-names></name> <name><surname>Miyake</surname> <given-names>H.</given-names></name> <name><surname>Miyajima</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Guidelines for management of idiopathic normal pressure hydrocephalus: second edition</article-title>. <source>Neurol. Med. Chir.</source> <volume>52</volume>, <fpage>775</fpage>&#x02013;<lpage>809</lpage>. <pub-id pub-id-type="doi">10.2176/nmc.52.775</pub-id><pub-id pub-id-type="pmid">23183074</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Neikter</surname> <given-names>J.</given-names></name> <name><surname>Agerskov</surname> <given-names>S.</given-names></name> <name><surname>Hellstrom</surname> <given-names>P.</given-names></name> <name><surname>Tullberg</surname> <given-names>M.</given-names></name> <name><surname>Starck</surname> <given-names>G.</given-names></name> <name><surname>Ziegelitz</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Ventricular volume is more strongly associated with clinical improvement than the evans index after shunting in idiopathic normal pressure hydrocephalus</article-title>. <source>AJNR Am. J. Neuroradiol.</source> <volume>41</volume>, <fpage>1187</fpage>&#x02013;<lpage>1192</lpage>. <pub-id pub-id-type="doi">10.3174/ajnr.A6620</pub-id><pub-id pub-id-type="pmid">32527841</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nestor</surname> <given-names>S. M.</given-names></name> <name><surname>Rupsingh</surname> <given-names>R.</given-names></name> <name><surname>Borrie</surname> <given-names>M.</given-names></name> <name><surname>Smith</surname> <given-names>M.</given-names></name> <name><surname>Accomazzi</surname> <given-names>V.</given-names></name> <name><surname>Wells</surname> <given-names>J. L.</given-names></name> <etal/></person-group>. (<year>2008</year>). <article-title>Ventricular enlargement as a possible measure of Alzheimer&#x00027;s disease progression validated using the Alzheimer&#x00027;s disease neuroimaging initiative database</article-title>. <source>Brain</source> <volume>131</volume> (<supplement>Pt 9</supplement>), <fpage>2443</fpage>&#x02013;<lpage>2454</lpage>. <pub-id pub-id-type="doi">10.1093/brain/awn146</pub-id><pub-id pub-id-type="pmid">18669512</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Owen</surname> <given-names>J. P.</given-names></name> <name><surname>Bukshpun</surname> <given-names>P.</given-names></name> <name><surname>Pojman</surname> <given-names>N.</given-names></name> <name><surname>Thieu</surname> <given-names>T.</given-names></name> <name><surname>Chen</surname> <given-names>Q.</given-names></name> <name><surname>Lee</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Brain MR imaging findings and associated outcomes in carriers of the reciprocal copy number variation at 16p11.2</article-title>. <source>Radiology</source> <volume>286</volume>, <fpage>217</fpage>&#x02013;<lpage>226</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2017162934</pub-id><pub-id pub-id-type="pmid">28786752</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Poh</surname> <given-names>L. E.</given-names></name> <name><surname>Gupta</surname> <given-names>V.</given-names></name> <name><surname>Johnson</surname> <given-names>A.</given-names></name> <name><surname>Kazmierski</surname> <given-names>R.</given-names></name> <name><surname>Nowinski</surname> <given-names>W. L.</given-names></name></person-group> (<year>2012</year>). <article-title>Automatic segmentation of ventricular cerebrospinal fluid from ischemic stroke CT images</article-title>. <source>Neuroinformatics</source> <volume>10</volume>, <fpage>159</fpage>&#x02013;<lpage>172</lpage>. <pub-id pub-id-type="doi">10.1007/s12021-011-9135-9</pub-id><pub-id pub-id-type="pmid">22125015</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Qian</surname> <given-names>X.</given-names></name> <name><surname>Lin</surname> <given-names>Y.</given-names></name> <name><surname>Zhao</surname> <given-names>Y.</given-names></name> <name><surname>Yue</surname> <given-names>X.</given-names></name> <name><surname>Lu</surname> <given-names>B.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>Objective ventricle segmentation in brain CT with ischemic stroke based on anatomical knowledge</article-title>. <source>Biomed. Res. Int.</source> <volume>2017</volume>:<fpage>8690892</fpage>. <pub-id pub-id-type="doi">10.1155/2017/8690892</pub-id><pub-id pub-id-type="pmid">28271071</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="thesis"><person-group person-group-type="author"><name><surname>Qiu</surname> <given-names>W.</given-names></name> <name><surname>Yuan</surname> <given-names>J.</given-names></name> <name><surname>Rajchl</surname> <given-names>M.</given-names></name> <name><surname>Kishimoto</surname> <given-names>J.</given-names></name> <name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>de Ribaupierre</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>3D MR ventricle segmentation in pre-term infants with post-hemorrhagic ventricle dilatation (PHVD) using multi-phase geodesic level-sets</article-title>. <source>Neuroimage</source> <volume>118</volume>, <fpage>13</fpage>&#x02013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.05.099</pub-id><pub-id pub-id-type="pmid">26070262</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Relkin</surname> <given-names>N.</given-names></name> <name><surname>Marmarou</surname> <given-names>A.</given-names></name> <name><surname>Klinge</surname> <given-names>P.</given-names></name> <name><surname>Bergsneider</surname> <given-names>M.</given-names></name> <name><surname>Black</surname> <given-names>P. M.</given-names></name></person-group> (<year>2005</year>). <article-title>Diagnosing idiopathic normal-pressure hydrocephalus</article-title>. <source>Neurosurg.</source> <volume>57</volume>, <fpage>S4</fpage>&#x02013;<lpage>S16</lpage>. <pub-id pub-id-type="doi">10.1227/01.NEU.0000168185.29659.C5</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Ronneberger</surname> <given-names>O.</given-names></name> <name><surname>Fischer</surname> <given-names>P.</given-names></name> <name><surname>Brox</surname> <given-names>T.</given-names></name></person-group> (<year>2015</year>). <article-title>&#x0201C;U-Net: convolutional networks for biomedical image segmentation,&#x0201D;</article-title> in: <source>Medical Image Computing and Computer-Assisted Intervention &#x02013; MICCAI 2015</source>. Lecture notes in computer science, Vol. 9351, eds N. Navab, J. Hornegger, W. Wells, and A. Frangi (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>).</citation></ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sayo</surname> <given-names>A.</given-names></name> <name><surname>Jennings</surname> <given-names>R. G.</given-names></name> <name><surname>Van Horn</surname> <given-names>J. D.</given-names></name></person-group> (<year>2012</year>). <article-title>Study factors influencing ventricular enlargement in schizophrenia: a 20 year follow-up meta-analysis</article-title>. <source>Neuroimage</source> <volume>59</volume>, <fpage>154</fpage>&#x02013;<lpage>167</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.07.011</pub-id><pub-id pub-id-type="pmid">21787868</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schoemaker</surname> <given-names>D.</given-names></name> <name><surname>Buss</surname> <given-names>C.</given-names></name> <name><surname>Pietrantonio</surname> <given-names>S.</given-names></name> <name><surname>Maunder</surname> <given-names>L.</given-names></name> <name><surname>Freiesleben</surname> <given-names>S. D.</given-names></name> <name><surname>Hartmann</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>The hippocampal-to-ventricle ratio (HVR): Presentation of a manual segmentation protocol and preliminary evidence</article-title>. <source>Neuroimage</source> <volume>203</volume>:<fpage>116108</fpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.116108</pub-id><pub-id pub-id-type="pmid">31472249</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shao</surname> <given-names>M.</given-names></name> <name><surname>Han</surname> <given-names>S.</given-names></name> <name><surname>Carass</surname> <given-names>A.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Blitz</surname> <given-names>A. M.</given-names></name> <name><surname>Shin</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Brain ventricle parcellation using a deep neural network: application to patients with ventriculomegaly</article-title>. <source>Neuroimage Clin.</source> <volume>23</volume>:<fpage>101871</fpage>. <pub-id pub-id-type="doi">10.1016/j.nicl.2019.101871</pub-id><pub-id pub-id-type="pmid">31174103</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shi</surname> <given-names>J.</given-names></name> <name><surname>Stonnington</surname> <given-names>C. M.</given-names></name> <name><surname>Thompson</surname> <given-names>P. M.</given-names></name> <name><surname>Chen</surname> <given-names>K.</given-names></name> <name><surname>Gutman</surname> <given-names>B.</given-names></name> <name><surname>Reschke</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Studying ventricular abnormalities in mild cognitive impairment with hyperbolic Ricci flow and tensor-based morphometry</article-title>. <source>Neuroimage</source> <volume>104</volume>, <fpage>1</fpage>&#x02013;<lpage>20</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2014.09.062</pub-id><pub-id pub-id-type="pmid">25285374</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Silbert</surname> <given-names>L. C.</given-names></name> <name><surname>Quinn</surname> <given-names>J. F.</given-names></name> <name><surname>Moore</surname> <given-names>M. M.</given-names></name> <name><surname>Corbridge</surname> <given-names>E.</given-names></name> <name><surname>Ball</surname> <given-names>M. J.</given-names></name> <name><surname>Murdoch</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2003</year>). <article-title>Changes in premorbid brain volume predict Alzheimer&#x00027;s disease pathology</article-title>. <source>Neurology</source> <volume>61</volume>, <fpage>487</fpage>&#x02013;<lpage>492</lpage>. <pub-id pub-id-type="doi">10.1212/01.WNL.0000079053.77227.14</pub-id><pub-id pub-id-type="pmid">12939422</pub-id></citation></ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sinnecker</surname> <given-names>T.</given-names></name> <name><surname>Ruberte</surname> <given-names>E.</given-names></name> <name><surname>Schadelin</surname> <given-names>S.</given-names></name> <name><surname>Canova</surname> <given-names>V.</given-names></name> <name><surname>Amann</surname> <given-names>M.</given-names></name> <name><surname>Naegelin</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>New and enlarging white matter lesions adjacent to the ventricle system and thalamic atrophy are independently associated with lateral ventricular enlargement in multiple sclerosis</article-title>. <source>J. Neurol.</source> <volume>267</volume>, <fpage>192</fpage>&#x02013;<lpage>202</lpage>. <pub-id pub-id-type="doi">10.1007/s00415-019-09565-w</pub-id><pub-id pub-id-type="pmid">31612322</pub-id></citation></ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tang</surname> <given-names>X.</given-names></name> <name><surname>Crocetti</surname> <given-names>D.</given-names></name> <name><surname>Kutten</surname> <given-names>K.</given-names></name> <name><surname>Ceritoglu</surname> <given-names>C.</given-names></name> <name><surname>Albert</surname> <given-names>M. S.</given-names></name> <name><surname>Mori</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Segmentation of brain magnetic resonance images based on multi-atlas likelihood fusion: testing using data with a broad range of anatomical and photometric profiles</article-title>. <source>Front. Neurosci.</source> <volume>9</volume>:<fpage>61</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2015.00061</pub-id><pub-id pub-id-type="pmid">25784852</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tang</surname> <given-names>X.</given-names></name> <name><surname>Luo</surname> <given-names>Y.</given-names></name> <name><surname>Chen</surname> <given-names>Z.</given-names></name> <name><surname>Huang</surname> <given-names>N.</given-names></name> <name><surname>Johnson</surname> <given-names>H. J.</given-names></name> <name><surname>Paulsen</surname> <given-names>J. S.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>A fully-automated subcortical and ventricular shape generation pipeline preserving smoothness and anatomical topology</article-title>. <source>Front. Neurosci.</source> <volume>12</volume>:<fpage>321</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2018.00321</pub-id><pub-id pub-id-type="pmid">29867332</pub-id></citation></ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Thompson</surname> <given-names>P. M.</given-names></name> <name><surname>Hayashi</surname> <given-names>K. M.</given-names></name> <name><surname>De Zubicaray</surname> <given-names>G. I.</given-names></name> <name><surname>Janke</surname> <given-names>A. L.</given-names></name> <name><surname>Rose</surname> <given-names>S. E.</given-names></name> <name><surname>Semple</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2004</year>). <article-title>Mapping hippocampal and ventricular change in Alzheimer disease</article-title>. <source>Neuroimage</source> <volume>22</volume>, <fpage>1754</fpage>&#x02013;<lpage>1766</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2004.03.040</pub-id><pub-id pub-id-type="pmid">15275931</pub-id></citation></ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Toma</surname> <given-names>A. K.</given-names></name> <name><surname>Holl</surname> <given-names>E.</given-names></name> <name><surname>Kitchen</surname> <given-names>N. D.</given-names></name> <name><surname>Watkins</surname> <given-names>L. D.</given-names></name></person-group> (<year>2011</year>). <article-title>Evans&#x00027; index revisited: the need for an alternative in normal pressure hydrocephalus</article-title>. <source>Neurosurgery</source> <volume>68</volume>, <fpage>939</fpage>&#x02013;<lpage>944</lpage>. <pub-id pub-id-type="doi">10.1227/NEU.0b013e318208f5e0</pub-id><pub-id pub-id-type="pmid">21221031</pub-id></citation></ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vu</surname> <given-names>T.</given-names></name> <name><surname>Jain</surname> <given-names>H.</given-names></name> <name><surname>Bucher</surname> <given-names>M.</given-names></name> <name><surname>Cord</surname> <given-names>M.</given-names></name> <name><surname>P&#x000E9;rez</surname> <given-names>P.</given-names></name></person-group> (<year>2019</year>). <article-title>&#x0201C;ADVENT: adversarial entropy minimization for domain adaptation in semantic segmentation,&#x0201D;</article-title> in <source>2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Long Beach, CA, USA</source>, <fpage>2512</fpage>&#x02013;<lpage>2521</lpage>.</citation></ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>C.</given-names></name> <name><surname>Dong</surname> <given-names>S.</given-names></name> <name><surname>Zhao</surname> <given-names>X.</given-names></name> <name><surname>Papanastasiou</surname> <given-names>G.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Yang</surname> <given-names>G.</given-names></name></person-group> (<year>2020a</year>). <article-title>SaliencyGAN: deep learning semisupervised salient object detection in the fog of IoT</article-title>. <source>IEEE Trans. Indus. Inform.</source> <volume>16</volume>, <fpage>2667</fpage>&#x02013;<lpage>2676</lpage>. <pub-id pub-id-type="doi">10.1109/TII.2019.2945362</pub-id></citation></ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>C.</given-names></name> <name><surname>Yang</surname> <given-names>G.</given-names></name> <name><surname>Papanastasiou</surname> <given-names>G.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Rodrigues</surname> <given-names>J.</given-names></name> <name><surname>Albuquerque</surname> <given-names>V.</given-names></name></person-group> (<year>2020b</year>). <article-title>&#x0201C;Industrial cyber-physical systems-based cloud IoT edge for federated heterogeneous distillation,&#x0201D;</article-title> in <source>IEEE Transactions on Industrial Informatics</source>, <fpage>1</fpage>.</citation></ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xia</surname> <given-names>Y.</given-names></name> <name><surname>Hu</surname> <given-names>Q.</given-names></name> <name><surname>Aziz</surname> <given-names>A.</given-names></name> <name><surname>Nowinski</surname> <given-names>W. L.</given-names></name></person-group> (<year>2004</year>). <article-title>A knowledge-driven algorithm for a rapid and automatic extraction of the human cerebral ventricular system from MR neuroimages</article-title>. <source>Neuroimage</source> <volume>21</volume>, <fpage>269</fpage>&#x02013;<lpage>282</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2003.09.029</pub-id><pub-id pub-id-type="pmid">14741665</pub-id></citation></ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yan</surname> <given-names>W.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Gu</surname> <given-names>S.</given-names></name> <name><surname>Huang</surname> <given-names>L.</given-names></name> <name><surname>Yan</surname> <given-names>F.</given-names></name> <name><surname>Xia</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>&#x0201C;The domain shift problem of medical image segmentation and vendor-adaptation by Unet-GAN,&#x0201D;</article-title> in <source>International Conference on Medical Image Computing and Computer-Assisted Intervention</source>: (Cham Springer), <fpage>623</fpage>&#x02013;<lpage>631</lpage>.</citation></ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>Z.</given-names></name> <name><surname>Siddicquee</surname> <given-names>M.</given-names></name> <name><surname>Tajbakhsh</surname> <given-names>N.</given-names></name> <name><surname>Liang</surname> <given-names>J.</given-names></name></person-group> (<year>2018</year>). <article-title>UNet&#x0002B;&#x0002B;: a nested U-Net architecture for medical image segmentation</article-title>. <source>Deep Learn Med Image Anal Multimodal Learn Clin Decis Support</source> <volume>11045</volume>, <fpage>3</fpage>&#x02013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-00889-5_1</pub-id><pub-id pub-id-type="pmid">32613207</pub-id></citation></ref>
</ref-list>
<fn-group>
<fn fn-type="financial-disclosure"><p><bold>Funding.</bold> This work was supported in part by the Natural Science Foundation of Guangdong Province (2020A1515010918), in part by the Project of Shenzhen International Cooperation Foundation (GJHZ20180926165402083), in part by the Project of Shenzhen Basic Development Project (JCYJ 20190806164409040), in part by the Clinical Research Project of Shenzhen Health and Family Planning Commission (SZLY2018018), in part by the Hangzhou Economic and Technological Development Area Strategical Grant (Imperial Institute of Advanced Technology), in part by the European Research Council Innovative Medicines Initiative on Development of Therapeutics and Diagnostics Combatting Coronavirus Infections Award DRAGON: rapiD and secuRe AI imaging based diaGnosis, stratification, fOllow-up, and preparedness for coronavirus paNdemics (H2020-JTI-IMI2 101005122), and in part by the AI for Health Imaging Award CHAIMELEON: Accelerating the Lab to Market Transition of AI Tools for Cancer Management (H2020-SC1-FA-DTS-2019-1 952172).</p></fn>
</fn-group>
</back>
</article>