<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="review-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Bioinform.</journal-id>
<journal-title>Frontiers in Bioinformatics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Bioinform.</abbrev-journal-title>
<issn pub-type="epub">2673-7647</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">912809</article-id>
<article-id pub-id-type="doi">10.3389/fbinf.2022.912809</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Bioinformatics</subject>
<subj-group>
<subject>Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Open-Source Biomedical Image Analysis Models: A Meta-Analysis and Continuous Survey</article-title>
<alt-title alt-title-type="left-running-head">Li et al.</alt-title>
<alt-title alt-title-type="right-running-head">Open-Source Biomedical Image Analysis Models</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Li</surname>
<given-names>Rui</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1876911/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Sharma</surname>
<given-names>Vaibhav</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1876923/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Thangamani</surname>
<given-names>Subasini</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1840842/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Yakimovich</surname>
<given-names>Artur</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/954964/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Center for Advanced Systems Understanding (CASUS)</institution>, <institution>Helmholtz-Zentrum Dresden-Rossendorf e. V. (HZDR)</institution>, <addr-line>G&#xf6;rlitz</addr-line>, <country>Germany</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Bladder Infection and Immunity Group (BIIG)</institution>, <institution>Department of Renal Medicine</institution>, <institution>Division of Medicine</institution>, <institution>University College London</institution>, <institution>Royal Free Hospital Campus</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Artificial Intelligence for Life Sciences CIC</institution>, <addr-line>Dorset</addr-line>, <country>United Kingdom</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Roche Pharma International Informatics</institution>, <institution>Roche Diagnostics GmbH</institution>, <addr-line>Mannheim</addr-line>, <country>Germany</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/369374/overview">Jan Eglinger</ext-link>, Friedrich Miescher Institute for Biomedical Research (FMI), Switzerland</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1106240/overview">Jean-Karim H&#xe9;rich&#xe9;</ext-link>, European Molecular Biology Laboratory Heidelberg, Germany</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1303046/overview">Christopher Schmied</ext-link>, Fondazione Human Technopole, Italy</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Artur Yakimovich, <email>a.yakimovich@hzdr.de</email>
</corresp>
<fn fn-type="equal" id="fn1">
<label>
<sup>&#x2020;</sup>
</label>
<p>These authors have contributed equally to this work</p>
</fn>
<fn fn-type="other">
<p>This article was submitted to Computational BioImaging, a section of the journal Frontiers in Bioinformatics</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>05</day>
<month>07</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>2</volume>
<elocation-id>912809</elocation-id>
<history>
<date date-type="received">
<day>04</day>
<month>04</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>13</day>
<month>06</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2022 Li, Sharma, Thangamani and Yakimovich.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Li, Sharma, Thangamani and Yakimovich</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Open-source research software has proven indispensable in modern biomedical image analysis. A multitude of open-source platforms drive image analysis pipelines and help disseminate novel analytical approaches and algorithms. Recent advances in machine learning allow for unprecedented improvement in these approaches. However, these novel algorithms come with new requirements in order to remain open source. To understand how these requirements are met, we have collected 50 biomedical image analysis models and performed a meta-analysis of their respective papers, source code, dataset, and trained model parameters. We concluded that while there are many positive trends in openness, only a fraction of all publications makes all necessary elements available to the research community.</p>
</abstract>
<kwd-group>
<kwd>machine learing</kwd>
<kwd>deep learning</kwd>
<kwd>open source</kwd>
<kwd>bioimaging</kwd>
<kwd>image analysis</kwd>
<kwd>medical imaging</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<title>Introduction</title>
<p>The source code of data analysis algorithms made freely available for possible redistribution and modification (i.e. open source) has been beyond any doubt driving the ongoing revolution in Data Science (DS), Machine Learning (ML), and Artificial Intelligence (AI) (<xref ref-type="bibr" rid="B53">Sonnenburg et al., 2007</xref>; <xref ref-type="bibr" rid="B30">Landset et al., 2015</xref>; <xref ref-type="bibr" rid="B1">Abadi et al., 2016</xref>; <xref ref-type="bibr" rid="B42">Paszke et al., 2019</xref>). Encouraging open collaboration, the open-source model of code redistribution allows researchers to build upon their peers&#x2019; work on a global scale fueling the rapid iterative improvement in the respective fields (<xref ref-type="bibr" rid="B53">Sonnenburg et al., 2007</xref>). Conversely, &#x201c;closed-source&#x201d; publications not only hamper the development of the field but also make it hard for the researchers to reproduce the results disseminated in the research articles. While <italic>de jure</italic> all published work resides in the public domain, reverse engineering of an advanced algorithm implementation may often take weeks or months, making such works hard to reproduce.</p>
<p>Needless to say, open source comes in a great variety of shapes and kinds. Remarkably, just making the source code of your research software available publicly or upon request does not <italic>per se</italic> make it open source. Usage and redistribution of any original creation, be it a research article or source code, lies within the legal boundaries of copyright laws, which differ significantly from country to country. Therefore, for example, publicly available code without an explicit attribution of a respective open-source license cannot be counted or treated as open source. Due to the sheer diversity, it may be difficult to judge which specific license is right for one&#x2019;s project. Yet the choice of the license must always be dictated by the project and the intent of its authors. Consulting the licenses list approved by the Open Source Initiative is generally considered to be a good starting point.</p>
<p>The importance of open source software for computational biomedical image analysis has become self-evident in the past 3&#xa0;decades. Packages like ImageJ/Fiji (<xref ref-type="bibr" rid="B47">Schindelin et al., 2012</xref>; <xref ref-type="bibr" rid="B49">Schneider et al., 2012</xref>), CellProfiler (<xref ref-type="bibr" rid="B5">Carpenter et al., 2006</xref>), KNIME (<xref ref-type="bibr" rid="B56">Tiwari and Sekhar, 2007</xref>), and Icy (<xref ref-type="bibr" rid="B9">de Chaumont et al., 2011</xref>) not only perform the bulk of quantification tasks in the wetlabs but also serve as platforms for distribution of modules containing cutting-edge algorithms. The ability to install and use these modules and algorithms by researchers from various fields <italic>via</italic> a point-and-click interface made it possible for the research groups without image analysis specialists to obtain a qualitatively new level of biomedical insights from their data. Yet, as we transition into the data-driven and representation learning paradigm of biomedical image analysis, the availability of datasets and trained model parameters becomes as important as the open-source code.</p>
<p>The ability to download training parameters may allow researchers to skip the initial model training and focus on gradual model improvement through a technique known as transfer learning (<xref ref-type="bibr" rid="B65">West et al., 2007</xref>; <xref ref-type="bibr" rid="B40">Pan and Yang, 2010</xref>). Transfer learning has proven effective in Computer Vision (<xref ref-type="bibr" rid="B10">Deng et al., 2009</xref>) and Natural Language Processing (<xref ref-type="bibr" rid="B66">Wolf et al., 2020</xref>) domains (further reviewed in (<xref ref-type="bibr" rid="B69">Yakimovich et al., 2021</xref>)). However, the complexity of sharing the trained parameters of a model differs significantly between ML algorithms. For example, while model parameters of a conventional ML algorithm like linear regression may be conveniently shared in the text of the article, this is impossible for DL models with millions of parameters. This, in turn, requires rethinking conventional approaches to ML/DL models sharing under an open-source license.</p>
<p>In this review, we collate ML models for biomedical image analysis recently published in the peer-reviewed literature and available as open-source. We describe open-source licenses used, code availability, data availability, biomedical and ML tasks, as well as the availability of model parameters. We make the collated collection of the open-source model available <italic>via</italic> a GitHub repository and call on the research community to contribute their models to it <italic>via</italic> pull requests. Furthermore, we provide descriptive statistics of our observations and discuss the pros and cons of the status quo in the field of biomedical image analysis as well as perspectives in the general DS context. Several efforts to create biomedical ML model repositories or so-called &#x201c;zoos&#x201d; (e.g. bioimage. io) and web-based task consolidators (<xref ref-type="bibr" rid="B22">Hollandi et al., 2020</xref>; <xref ref-type="bibr" rid="B54">Stringer et al., 2021</xref>) have been undertaken. Here, rather than proposing a competing effort, we propose a continuous survey of the field &#x201c;as is&#x201d;. We achieve this through collating metadata of published papers and their respective source code, data, and model parameters (also known as weights and checkpoints).</p>
<sec id="s1-1">
<title>Continuous Biomedical Image Analysis Model Survey</title>
<p>To understand the availability, reproducibility, and accessibility of published biomedical image analysis models we have collected a survey meta-dataset of 50 model articles and preprints published within the last 10&#xa0;years. During our collection effort, we have prioritized publications with accompanying source code freely available online. In an attempt to minimize bias, we made sure that no individual medical imaging modality or biomedical task represents more than 25% of our dataset. Additionally, we have attempted to sample models published by both the biomedical community (e.g. Nature group journals), engineering community (IEEE group journals and conferences), as well as models published as preprints. For each publication we have noted the biomedical imaging modality, biomedical task (e.g. cancer), the open-source license used, reported model performance with respective metric, whether the model is dealing with the supervised task, whether the model parameters can be downloaded (as well as the respective link), links to code and dataset. Noteworthy, performance reporting is highly dependent on a dataset or benchmark. Therefore, to avoid confusion or bias we have recorded the best-reported performance for illustrative purposes only. Identical performance on a different dataset should not be expected. For the purpose of this review, we have split this meta-dataset into three tables according to the ML task of the models. The full dataset is available on GitHub (<ext-link ext-link-type="uri" xlink:href="https://github.com/casus/bim">https://github.com/casus/bim</ext-link>). To ensure the completeness and correctness of this meta-dataset we invite the research community to contribute their additions and corrections to our survey meta-dataset.</p>
<p>First display table obtained from our meta-dataset contains 14 models aimed at biomedical image classification (<xref ref-type="table" rid="T1">Table 1</xref>). The most prevalent imaging modalities for this ML task are computed tomography (CT) and digital pathology&#x2014;both highly clinically relevant modalities. We noted that most publications had an open-source license clearly defined in their repositories. The consensus between the choices of metric is rather low, making it difficult to compare one model to the other. Although most models had both source code and datasets available, only 4 out of 14 models had trained model parameters available for download.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Biomedical Image Classification Models. Here, AUC is Area under curve, CT is computed tomography.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Imaging Modality</th>
<th align="center">Biomed Task</th>
<th align="center">License</th>
<th align="center">Reported Performance</th>
<th align="center">Parameters Download</th>
<th align="center">References</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">CT</td>
<td align="left">Lung tumor</td>
<td align="left">Apache-2.0</td>
<td align="left">0.93 Accuracy</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B29">LaLonde et al. (2020)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Lung tumor</td>
<td align="left">MIT</td>
<td align="left">0.76 AUC</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B15">Guo et al. (2020)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Pulmonary nodule</td>
<td align="left">GPL-3.0</td>
<td align="left">0.90 Accuracy</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B72">Zhu et al. (2018a)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Pulmonary nodule</td>
<td align="left">MIT</td>
<td align="left">0.96 AUC</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B2">Al-Shabi et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Pulmonary nodule</td>
<td align="left">MIT</td>
<td align="left">0.95 AUC</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B11">Dey et al. (2018)</xref>
</td>
</tr>
<tr>
<td align="left">Dermatoscopy</td>
<td valign="top" align="left">Skin tumor</td>
<td align="left">N/a</td>
<td align="left">0.93 Accuracy</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B8">Datta et al. (2021)</xref>
</td>
</tr>
<tr>
<td align="left">Dermatoscopy</td>
<td valign="top" align="left">Skin tumor</td>
<td align="left">MIT</td>
<td align="left">0.81 AUC</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B74">Zunair and Ben Hamza, (2020)</xref>
</td>
</tr>
<tr>
<td align="left">Mammography</td>
<td valign="top" align="left">Breast tumor</td>
<td align="left">CC BY-NC-ND 4.0</td>
<td align="left">0.93 AUC</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B52">Shen et al. (2021)</xref>
</td>
</tr>
<tr>
<td align="left">Digital Pathology</td>
<td valign="top" align="left">Breast tumor</td>
<td align="left">CC BY-NC-ND 4.0</td>
<td align="left">0.63 F1</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B43">Pati et al. (2022)</xref>
</td>
</tr>
<tr>
<td align="left">Mammography</td>
<td valign="top" align="left">Breast tumor</td>
<td align="left">CC BY-NC-SA 4.0</td>
<td align="left">0.84 Accuracy</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B51">Shen et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">Digital Pathology</td>
<td valign="top" align="left">Breast tumor</td>
<td align="left">MIT</td>
<td align="left">0.93 Accuracy</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B44">Rakhlin et al. (2018)</xref>
</td>
</tr>
<tr>
<td align="left">Digital Pathology</td>
<td valign="top" align="left">Lung tumor</td>
<td align="left">GPL-3.0</td>
<td align="left">0.53 Kappa</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B63">Wei et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">Digital Pathology</td>
<td valign="top" align="left">Lung tumor</td>
<td align="left">MIT</td>
<td align="left">0.97 AUC</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B7">Coudray et al. (2018)</xref>
</td>
</tr>
<tr>
<td align="left">Fluorescence microscopy</td>
<td valign="top" align="left">Host-pathogen interactions</td>
<td align="left">N/a</td>
<td align="left">0.92 Accuracy</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B13">Fisch et al. (2019)</xref>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The second display table contains 25 models (<xref ref-type="table" rid="T2">Table 2</xref>) aimed at biomedical image segmentation&#x2014;a task relevant for obtaining quantitative insights from the biomedical images (e.g. size of the tumor). Similarly, to the models for biomedical image classification, the vast majority of the segmentation models have a well-defined open-source license with only a few exceptions. Again, similarly to the classification models, the consensus between performance metric choices is rather low, although Dice score reports clearly dominated. Conversely, the percentage of models with pre-trained parameters available for download is slightly higher than in the case of the classification models (36% vs 29%). However, over half of the models do not provide pre-trained parameters for the download for both segmentation and classification tasks.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Biomedical Image Segmentation Models. Here, CT is computed tomography, DSC is Dice similarity coefficient, AP is Average Precision, IoU is Intersection over Union, DOF is Depth of field, AUC is Area under curve, SHG is Second harmonic generation microscopy.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Imaging Modality</th>
<th align="center">Biomed Task</th>
<th align="center">License</th>
<th align="center">Reported Performance</th>
<th align="center">Parameters Download</th>
<th align="center">References</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">3D microscopy</td>
<td align="left">Nuclei detection</td>
<td align="left">MIT</td>
<td align="left">0.937 AP</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B20">Hirsch and Kainmueller, (2020)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Kidney tumor</td>
<td align="left">GPL-3.0</td>
<td align="left">0.95 Dice</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B36">M&#xfc;ller and Kramer, (2021)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Pulmonary nodule</td>
<td align="left">BSD-3-Clause</td>
<td align="left">N/a</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B16">Hancock and Magnan, (2019)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Pulmonary nodule</td>
<td align="left">CC BY-NC-SA 4.0</td>
<td align="left">0.55 IoU</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B3">Aresta et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Pulmonary nodule</td>
<td align="left">MIT</td>
<td align="left">0.83 DSC</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B26">Keetha et al. (2020)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Pancreas &#x26; Brain tumor</td>
<td align="left">MIT</td>
<td align="left">0.84 Dice</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B39">Oktay et al. (2018)</xref>
</td>
</tr>
<tr>
<td align="left">CT, Dermatoscopy</td>
<td valign="top" align="left">Lung tumor and Skin tumor</td>
<td align="left">N/a</td>
<td align="left">0.9965 Jaccard</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B25">Kaul et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Brain tumor</td>
<td align="left">Apache 2.0</td>
<td align="left">0.89 Dice</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B23">Isensee et al. (2018)</xref>
</td>
</tr>
<tr>
<td align="left">MRI</td>
<td valign="top" align="left">Brain tumor</td>
<td align="left">Apache 2.0</td>
<td align="left">0.79 Dice</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B62">Wang et al. (2021)</xref>
</td>
</tr>
<tr>
<td align="left">MRI</td>
<td valign="top" align="left">Brain tumor</td>
<td align="left">CC BY-NC-ND 4.0</td>
<td align="left">0.76 Dice</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B4">Baek et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">Digital Pathology</td>
<td valign="top" align="left">Breast tumor</td>
<td align="left">CC BY-NC-ND 4.0</td>
<td align="left">0.893 F1</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B32">Le et al. (2020)</xref>
</td>
</tr>
<tr>
<td align="left">Digital Pathology</td>
<td valign="top" align="left">Lung tumor</td>
<td align="left">CC-BY</td>
<td align="left">0.83 Accuracy</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B57">Tomita et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">Digital Pathology</td>
<td valign="top" align="left">Multiple pathologies</td>
<td align="left">MIT</td>
<td align="left">N/a</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B27">Khened et al. (2021)</xref>
</td>
</tr>
<tr>
<td align="left">Electron microscopy</td>
<td valign="top" align="left">Multiple pathologies</td>
<td align="left">MIT</td>
<td align="left">0.5 VI</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B33">Lee et al. (2017)</xref>
</td>
</tr>
<tr>
<td align="left">Fluorescence microscopy</td>
<td valign="top" align="left">Cellular structures reconstruction</td>
<td align="left">N/a</td>
<td align="left">20 x Enhancement in DOF</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B68">Wu et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">Fluorescence microscopy</td>
<td valign="top" align="left">Nuclei detection</td>
<td align="left">BSD-3-Clause</td>
<td align="left">0.94 Accuracy</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B64">Weigert et al. (2020)</xref>
</td>
</tr>
<tr>
<td align="left">Microscopy</td>
<td valign="top" align="left">Cellular reconstruction</td>
<td align="left">N/a</td>
<td align="left">0.69 AP</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B21">Hirsch et al. (2020)</xref>
</td>
</tr>
<tr>
<td align="left">MRI</td>
<td valign="top" align="left">Brain tumor</td>
<td align="left">BSD-3-Clause</td>
<td align="left">0.87 Dice</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B61">Wang et al. (2018)</xref>
</td>
</tr>
<tr>
<td align="left">MRI</td>
<td valign="top" align="left">Brain tumor</td>
<td align="left">MIT</td>
<td align="left">0.85 Dice</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B17">Havaei et al. (2017)</xref>
</td>
</tr>
<tr>
<td align="left">MRI</td>
<td valign="top" align="left">Brain tumor</td>
<td align="left">MIT</td>
<td align="left">0.90 Dice</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B23">Isensee et al. (2018)</xref>
</td>
</tr>
<tr>
<td align="left">MRI</td>
<td valign="top" align="left">Brain tumor</td>
<td align="left">MIT</td>
<td align="left">0.91 Dice</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B37">Myronenko, (2019)</xref>
</td>
</tr>
<tr>
<td align="left">SHG</td>
<td valign="top" align="left">Bone disease</td>
<td align="left">GPL-3.0</td>
<td align="left">0.78 Accuracy</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B48">Schmarje et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">Time-lapse microscopy</td>
<td valign="top" align="left">Nuclei detection</td>
<td align="left">N/a</td>
<td align="left">0.92 Accuracy</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B50">Shailja et al. (2021)</xref>
</td>
</tr>
<tr>
<td align="left">Ultrasound imaging</td>
<td valign="top" align="left">Intraventricular hemorrhage</td>
<td align="left">MIT</td>
<td align="left">0.89 Dice</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B59">Valanarasu et al. (2020)</xref>
</td>
</tr>
<tr>
<td align="left">MRI</td>
<td valign="top" align="left">Brain tumor</td>
<td align="left">N/a</td>
<td align="left">0.81 Dice</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B31">Larrazabal et al. (2021)</xref>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Finally, we have also examined biomedical image analysis models aimed at less popular ML tasks including data generation, object detection or reconstruction (<xref ref-type="table" rid="T3">Table 3</xref>). Apart from digital pathology, CT scans this group of models also contains light and electron microscopy. Remarkably, only 19% of models in this group had downloadable model parameters. At the same time, almost all the models in this group had well attributed open-source licenses. This may suggest that parameter sharing is not very common in highly specialized fields like microscopy. Interestingly, for this and other groups of ML tasks, we have found that parameter sharing was more common in models submitted as a part of a data challenge. This may be simply a result of data challenge participation conditions.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Other Biomedical Image Models. Here, CT is computed tomography.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Imaging Modality</th>
<th align="center">Biomed Task</th>
<th align="center">ML Task</th>
<th align="center">License</th>
<th align="center">Parameters Download</th>
<th align="center">References</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Mammography</td>
<td align="left">Breast tumor</td>
<td align="left">Classification &#x26; Detection</td>
<td align="left">N/a</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B45">Ribli et al. (2018)</xref>
</td>
</tr>
<tr>
<td align="left">Fluorescence microscopy</td>
<td valign="top" align="left">Cellular structures reconstruction</td>
<td align="left">Data generation</td>
<td align="left">Apache-2.0</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B12">Eschweiler et al. (2021)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Pulmonary nodule</td>
<td align="left">Detection</td>
<td align="left">Apache-2.0</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B73">Zhu et al. (2018b)</xref>
</td>
</tr>
<tr>
<td align="left">CT</td>
<td valign="top" align="left">Pulmonary nodule</td>
<td align="left">Detection</td>
<td align="left">MIT</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B34">Li and Fan, (2020)</xref>
</td>
</tr>
<tr>
<td align="left">Digital Pathology</td>
<td valign="top" align="left">Multiple pathologies</td>
<td align="left">Graph embedding</td>
<td align="left">AGPL 3.0</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B24">Jaume et al. (2021)</xref>
</td>
</tr>
<tr>
<td align="left">Mammography</td>
<td valign="top" align="left">Breast tumor</td>
<td align="left">Image Inpainting &#x26; Data generation</td>
<td align="left">CC BY-NC-ND 4.0</td>
<td align="center">Yes</td>
<td align="left">
<xref ref-type="bibr" rid="B67">Wu et al. (2018)</xref>
</td>
</tr>
<tr>
<td align="left">Confocal microscopy</td>
<td valign="top" align="left">Cellular structures reconstruction</td>
<td align="left">Reconstruction</td>
<td align="left">Apache-2.0</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B60">Vizca&#xed;no et al. (2021)</xref>
</td>
</tr>
<tr>
<td align="left">Cryo-electron microscopy</td>
<td valign="top" align="left">Cellular structures reconstruction</td>
<td align="left">Reconstruction</td>
<td align="left">GPL-3.0</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B71">Zhong et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">Cryo-electron microscopy</td>
<td valign="top" align="left">Protein structures reconstruction</td>
<td align="left">Reconstruction</td>
<td align="left">GPL-3.0</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B58">Ullrich et al. (2019)</xref>
</td>
</tr>
<tr>
<td align="left">Electron microscopy</td>
<td valign="top" align="left">Cellular structures reconstruction</td>
<td align="left">Reconstruction</td>
<td align="left">N/a</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B14">Guay et al. (2021)</xref>
</td>
</tr>
<tr>
<td align="left">3D microscopy</td>
<td valign="top" align="left">Image acquisition</td>
<td align="left">Reconstruction</td>
<td align="left">BSD-3-Clause</td>
<td align="center">No</td>
<td align="left">
<xref ref-type="bibr" rid="B46">Saha et al. (2020)</xref>
</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s1-2">
<title>Trends Meta-Analysis in Biomedical Image Analysis Model</title>
<p>To understand general trends in the collection of our open-source models we have computed respective fractions of each descriptive category we have assigned to each work. The assignment was performed through careful analysis of the respective research article, code repository, dataset repository, and the availability of the trained model parameters (<xref ref-type="fig" rid="F1">Figure 1</xref>). While admittedly 50 papers constitute a relatively small sample size, we have made the best reasonable effort to ensure the sampling was unbiased. Specifically, the set of models we have reviewed addresses the following biomedical tasks (from most to least frequent): pulmonary nodule, brain tumor, breast tumor, cellular structures reconstruction, lung tumor, cell nuclei detection, multiple pathologies, skin tumor, protein structures reconstruction, kidney tumor, pancreas and brain tumor, lung tumor and skin tumor, host-pathogen interactions, bone disease, image acquisition, intraventricular hemorrhage (<xref ref-type="fig" rid="F1">Figure 1A</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Meta-analysis of trends in open-source biomedical image analysis models <bold>(A)</bold> Biomedical tasks overview and breakdown in our collection <bold>(B)</bold> Variety of imaging modalities <bold>(C)</bold> Machine learning tasks the models are aimed at <bold>(D)</bold> Are the ML algorithms used for supervised or unsupervised learning tasks <bold>(E)</bold> Prevalence of open source licenses used <bold>(F)</bold> Availability of datasets <bold>(G)</bold> Availability of trained model parameters <bold>(H)</bold> Prevalence of platforms used for trained model parameters sharing. Here, CT is computed tomography, MRI is magnetic resonance imaging.</p>
</caption>
<graphic xlink:href="fbinf-02-912809-g001.tif"/>
</fig>
<p>From the perspective of imaging modalities, the models we reviewed span the following: computed tomography (CT), digital pathology, magnetic resonance imaging (MRI), mammography, fluorescence microscopy, 3D microscopy, cryo-electron microscopy, dermatoscopy, electron microscopy, confocal microscopy, CT and dermatoscopy, light and electron microscopy, second harmonic generation microscopy, time-lapse microscopy, ultrasound imaging (<xref ref-type="fig" rid="F1">Figure 1B</xref>). From the perspective of ML tasks these models covered the following: segmentation, reconstruction classification, object detection, imagine inpainting and data generation, graph embedding, classification, and detection (<xref ref-type="fig" rid="F1">Figure 1C</xref>). 86% of the models we have reviewed were addressing supervised tasks and 14% unsupervised tasks (<xref ref-type="fig" rid="F1">Figure 1D</xref>).</p>
<p>Within our collection of open-source models, we have noted that 32% of the authors have selected the MIT license, 18% have selected Apache-2.0, 12%&#x2014;GPL-3.0, 10%&#x2014;BSD-3-Clause license, 8%&#x2014;CC BY-NC-SA 4.0 license. Remarkably, another 8% have published their code without license attribution, arguably making it harder for the field to understand the freedom to operate with the code made available with the paper (<xref ref-type="fig" rid="F1">Figure 1E</xref>). Within these papers, 84% of the authors made the dataset used to train the model available and clearly indicated within the paper or the code repository (<xref ref-type="fig" rid="F1">Figure 1F</xref>). Overall, this amounted to the vast majority of the works which we have selected to have a clear open-source license designation, as well as a dataset available.</p>
<p>Remarkably, while providing the model&#x2019;s source code, as well as, in most cases, the model&#x2019;s dataset, an impressive 68% of the contributions we have reviewed did not provide trained model parameters (<xref ref-type="fig" rid="F1">Figure 1G</xref>). Breaking down by the publishers or repositories, 43% and 31% of papers published by Nature group and Springer respectively provided model parameters. However, only 25% of IEEE papers and 14% of arXiv preprints provided parameters. Altogether, the low percentage of shared parameters are suggesting that the efforts to reproduce these papers came with the caveat of provisioning a hardware setup capable of wielding the computational load required by the respective model. In some cases that requiresaccess to the high-capacity computing. Furthermore, this way, instead of simply building upon the models trained, the efforts of the authors would have to be first reproduced. Needless to say, should any of the papers become seminal these high-performance computations would have to be repeated time and time again, possibly taking days of GPU computation.</p>
<p>Interestingly, of the authors who have chosen to make the trained parameters available to the readers around 25% have chosen to deposit the parameters on GitHub, while 19% and 6% have opted for Google drive and Dropbox services respectively. The rest deposited their parameters on the proprietary and other services (<xref ref-type="fig" rid="F1">Figure 1H</xref>).</p>
</sec>
</sec>
<sec sec-type="discussion" id="s2">
<title>Discussion</title>
<p>The advent of ML and specifically representation learning is opening a new horizon for biomedical image analysis. Yet, the success of these new advanced ML approaches brings about new requirements and standards to ensure quality and reproducibility (<xref ref-type="bibr" rid="B19">Hernandez-Boussard et al., 2020</xref>; <xref ref-type="bibr" rid="B35">Mongan et al., 2020</xref>; <xref ref-type="bibr" rid="B38">Norgeot et al., 2020</xref>; <xref ref-type="bibr" rid="B18">Heil et al., 2021</xref>; <xref ref-type="bibr" rid="B28">Laine et al., 2021</xref>). Several minimalistic quality standards applicable to the clinical setting have been proposed (<xref ref-type="bibr" rid="B19">Hernandez-Boussard et al., 2020</xref>; <xref ref-type="bibr" rid="B35">Mongan et al., 2020</xref>; <xref ref-type="bibr" rid="B38">Norgeot et al., 2020</xref>), and while coming from slightly different perspectives they demonstrate an overlap on essential topics like the dataset description, comparison to baseline and hyperparameters sharing. For example, CLAIM (<xref ref-type="bibr" rid="B35">Mongan et al., 2020</xref>) and MINIMAR (<xref ref-type="bibr" rid="B19">Hernandez-Boussard et al., 2020</xref>) approaches aim to adhere to a clinical tradition. Authors define a checklist including a structure of an academic biomedical paper, requiring either a lengthy biomedical problem description (CLAIM) or descriptive statistics of the dataset&#x2019;s internal structure (MINIMAR). At the same time, MI-CLAIM (<xref ref-type="bibr" rid="B38">Norgeot et al., 2020</xref>) aims to adhere to the Data Science tradition, focusing specifically on data preprocessing and baseline comparison. Remarkably, even though item 24 of the CLAIM checklist explicitly mentions the importance of specifying the source of the starting weights (parameters) if transfer learning is employed, all three approaches fail to explicitly encourage sharing of the trained model parameters. Instead of proposing yet another checklist, the current survey aims to understand to extend to which the model parameters are shared in the biomedical image analysis field and emphasize the importance of parameters sharing to foster reproducibility in the field.</p>
<p>The past 3&#xa0;decades have successfully demonstrated the viability of the open-source model for the research software in this field, as well as the role of open-source software in fostering scientific progress. However, the change of modeling paradigm to DL requires new checks and balances to ensure the results are reproducible and the efforts are not doubled. Furthermore, major computational efforts inevitably come with an environmental footprint (<xref ref-type="bibr" rid="B55">Strubell et al., 2020</xref>). Making parameters of the trained models available to the research community not only could minimize this footprint, but also open new prospects for the researcher wishing to fine-tune the pre-trained models to their task of choice. Such an approach proved incredibly fruitful in the field of natural language processing (<xref ref-type="bibr" rid="B70">Zhang et al., 2020</xref>).</p>
<p>Remarkably, in the current survey, we have found that only 32% of the biomedical models we have reviewed made the train model parameters available for download. On one hand, such a low number of trained models available for download may be explained by the fact that many journals and conferences do not require trained models to warrant publication. On another hand, with parameters of some models requiring hundreds of megabytes of storage, there are not many opportunities to share these files. Interestingly, while some researchers shared their trained model parameters <italic>via</italic> platforms like GitHub, Google drive, and Dropbox, the vast majority opted for often proprietary sites to share these parameters (<xref ref-type="fig" rid="F1">Figure 1H</xref>). In our opinion, this indicates the necessity of hubs and platforms for sharing trained biomedical image analysis models.</p>
<p>It is worth noting that most cloud storage services like Google drive or Dropbox are more suited for instant file sharing rather than archival deposition of model parameters. These storage solutions don&#x2019;t offer data immutability or digital object identifiers attached to them, and hence can simply be overwritten or disappear leaving crucial content inaccessible. Authors opting for self-hosting of model parameters also likely underestimate the workload of the long-term serving of archival data. Instead of the aforementioned approaches to model sharing, one should take advantage of efforts like BioImage.io, Tensorflow Hub (<xref ref-type="bibr" rid="B41">Paper, 2021</xref>), PyTorch Hub, DLHub (<xref ref-type="bibr" rid="B6">Chard et al., 2019</xref>), or similar in order to foster consistency and reproducibility of their results. Arguably, one of the most intuitive experiences of model parameters sharing for the end-users is currently offered by the HuggingFace platform in the domain of natural language processing. This has largely been possible through the platform&#x2019;s own ML library allowing for improved compatibility (<xref ref-type="bibr" rid="B66">Wolf et al., 2020</xref>).</p>
<p>Interestingly, the vast majority of authors have chosen MIT and Apache-2.0 as their open-source licenses. Both Apache-2.0 and MIT are known for being permissive, rather than copyleft licenses. Furthermore, both licenses are very clearly formulated and easy to use. It is tempting to speculate that their popularity is a result of the simplicity and openness that these licenses offer.</p>
<p>However, noteworthy, our survey is limited to the papers we reviewed. To improve the representativeness of our meta-analysis, as well as encourage the dissemination of the open-source models in biomedical image analysis we call on our peers to contribute to our collection <italic>via</italic> the GitHub repository. Specifically, we invite the researchers to fork our repository, make additions to the content of the list following the contribution guidelines and merge them in <italic>via</italic> pull request. This way we hope to not only obtain an up-to-date state of the field but also ensure the code, datasets and trained model parameters are easier to find.</p>
</sec>
</body>
<back>
<sec id="s3">
<title>Author Contributions</title>
<p>AY conceived the idea. AY, ST, VS, and RL reviewed the published works and collated the data. AY, ST, VS, and RL wrote the manuscript.</p>
</sec>
<sec id="s4">
<title>Funding</title>
<p>This work was partially funded by the Center for Advanced Systems Understanding (CASUS) which is financed by Germany&#x2019;s Federal Ministry of Education and Research (BMBF) and by the Saxon Ministry for Science, Culture and Tourism (SMWK) with tax funds on the basis of the budget approved by the Saxon State Parliament. This work has been partially funded by OPTIMA. OPTIMA is funded through the IMI2 Joint Undertaking and is listed under grant agreement No. 101034347. IMI2 receives support from the European Union&#x2019;s Horizon 2020 research and innovation programme and the European Federation of Pharmaceutical Industries and Associations (EFPIA). IMI supports collaborative research projects and builds networks of industrial and academic experts in order to boost pharmaceutical innovation in Europe. The views communicated within are those of OPTIMA. Neither the IMI nor the European Union, EFPIA, or any Associated Partners are responsible for any use that may be made of the information contained herein.</p>
</sec>
<sec sec-type="COI-statement" id="s5">
<title>Conflict of Interest</title>
<p>AY was employed by Roche Pharma International Informatics, Roche Diagnostics GmbH, Mannheim, Germany</p>
<p> The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s6">
<title>Publisher&#x2019;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Abadi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Barham</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Davis</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Dean</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). &#x201c;<article-title>TensorFlow: A System for Large-Scale Machine Learning</article-title>,&#x201d; in <source>12th USENIX Symposium on Operating Systems Design and Implementation (OSDI 16)</source> (<publisher-name>usenix.org</publisher-name>), <fpage>265</fpage> </citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Al-Shabi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lan</surname>
<given-names>B. L.</given-names>
</name>
<name>
<surname>Chan</surname>
<given-names>W. Y.</given-names>
</name>
<name>
<surname>Ng</surname>
<given-names>K. H.</given-names>
</name>
<name>
<surname>Tan</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Lung Nodule Classification Using Deep Local-Global Networks</article-title>. <source>Int. J. Comput. Assist. Radiol. Surg.</source> <volume>14</volume>, <fpage>1815</fpage>&#x2013;<lpage>1819</lpage>. <pub-id pub-id-type="doi">10.1007/s11548-019-01981-7</pub-id> </citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aresta</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Jacobs</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ara&#xfa;jo</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Cunha</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ramos</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>van Ginneken</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>iW-Net: an Automatic and Minimalistic Interactive Lung Nodule Segmentation Deep Network</article-title>. <source>Sci. Rep.</source> <volume>9</volume>, <fpage>11591</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-019-48004-8</pub-id> </citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Baek</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Allen</surname>
<given-names>B. G.</given-names>
</name>
<name>
<surname>Buatti</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Smith</surname>
<given-names>B. J.</given-names>
</name>
<name>
<surname>Tong</surname>
<given-names>L.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Deep Segmentation Networks Predict Survival of Non-small Cell Lung Cancer</article-title>. <source>Sci. Rep.</source> <volume>9</volume>, <fpage>17286</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-019-53461-2</pub-id> </citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carpenter</surname>
<given-names>A. E.</given-names>
</name>
<name>
<surname>Jones</surname>
<given-names>T. R.</given-names>
</name>
<name>
<surname>Lamprecht</surname>
<given-names>M. R.</given-names>
</name>
<name>
<surname>Clarke</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Kang</surname>
<given-names>I. H.</given-names>
</name>
<name>
<surname>Friman</surname>
<given-names>O.</given-names>
</name>
<etal/>
</person-group> (<year>2006</year>). <article-title>CellProfiler: Image Analysis Software for Identifying and Quantifying Cell Phenotypes</article-title>. <source>Genome Biol.</source> <volume>7</volume>, <fpage>R100</fpage>. <pub-id pub-id-type="doi">10.1186/gb-2006-7-10-r100</pub-id> </citation>
</ref>
<ref id="B6">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Chard</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Chard</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ward</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Babuji</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Woodard</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). &#x201c;<article-title>DLHub: Model and Data Serving for Science</article-title>,&#x201d; in <conf-name>2019 IEEE International Parallel and Distributed Processing Symposium (IPDPS) (ieeexplore.ieee.org)</conf-name>, <fpage>283</fpage>&#x2013;<lpage>292</lpage>. <pub-id pub-id-type="doi">10.1109/ipdps.2019.00038</pub-id> </citation>
</ref>
<ref id="B7">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Coudray</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Ocampo</surname>
<given-names>P. S.</given-names>
</name>
<name>
<surname>Sakellaropoulos</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Narula</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Classification and Mutation Prediction from Non&#x2013;small Cell Lung Cancer Histopathology Images Using Deep Learning</article-title>,&#x201d;<source>Nat. Med.</source> <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://www.nature.com/articles/s41591-018-0177-5?sf197831152=1">https://www.nature.com/articles/s41591-018-0177-5?sf197831152&#x3d;1</ext-link>.</comment> </citation>
</ref>
<ref id="B8">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Datta</surname>
<given-names>S. K.</given-names>
</name>
<name>
<surname>Shaikh</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Srihari</surname>
<given-names>S. N.</given-names>
</name>
<name>
<surname>Gao</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Soft Attention Improves Skin Cancer Classification Performance</article-title>,&#x201d; in <source>Interpretability of Machine Intelligence in Medical Image Computing, and Topological Data Analysis and its Applications for Medical Data</source> (<publisher-name>Springer International Publishing</publisher-name>), <fpage>13</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-87444-5_2</pub-id> </citation>
</ref>
<ref id="B9">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>de Chaumont</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Dallongeville</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Olivo-Marin</surname>
<given-names>J.-C.</given-names>
</name>
</person-group> (<year>2011</year>). &#x201c;<article-title>ICY: A New Open-Source Community Image Processing Software</article-title>,&#x201d; in <conf-name>2011 IEEE International Symposium on Biomedical Imaging: From Nano to Macro</conf-name> (<publisher-name>ieeexplore.ieee.org</publisher-name>), <fpage>234</fpage>&#x2013;<lpage>237</lpage>. <pub-id pub-id-type="doi">10.1109/isbi.2011.5872395</pub-id> </citation>
</ref>
<ref id="B10">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Deng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Dong</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Socher</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>L.-J.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Fei-Fei</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2009</year>). &#x201c;<article-title>ImageNet: A Large-Scale Hierarchical Image Database</article-title>,&#x201d; in <conf-name>2009 IEEE Conference on Computer Vision and Pattern Recognition</conf-name>, <fpage>248</fpage>&#x2013;<lpage>255</lpage>. <pub-id pub-id-type="doi">10.1109/cvpr.2009.5206848</pub-id> </citation>
</ref>
<ref id="B11">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Dey</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Hong</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Diagnostic Classification of Lung Nodules Using 3D Neural Networks</article-title>,&#x201d; in <conf-name>2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018) (ieeexplore.ieee.org)</conf-name>, <fpage>774</fpage>&#x2013;<lpage>778</lpage>. <pub-id pub-id-type="doi">10.1109/isbi.2018.8363687</pub-id> </citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Eschweiler</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Rethwisch</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Jarchow</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Koppers</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Stegmaier</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>3D Fluorescence Microscopy Data Synthesis for Segmentation and Benchmarking</article-title>. <source>PLoS One</source> <volume>16</volume>, <fpage>e0260509</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0260509</pub-id> </citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fisch</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Yakimovich</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Clough</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Wright</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Bunyan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Howell</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Defining Host-Pathogen Interactions Employing an Artificial Intelligence Workflow</article-title>. <source>Elife</source> <volume>8</volume>, <fpage>e40560</fpage>. <pub-id pub-id-type="doi">10.7554/eLife.40560</pub-id> </citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guay</surname>
<given-names>M. D.</given-names>
</name>
<name>
<surname>Emam</surname>
<given-names>Z. A. S.</given-names>
</name>
<name>
<surname>Anderson</surname>
<given-names>A. B.</given-names>
</name>
<name>
<surname>Aronova</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Pokrovskaya</surname>
<given-names>I. D.</given-names>
</name>
<name>
<surname>Storrie</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Dense Cellular Segmentation for EM Using 2D-3D Neural Network Ensembles</article-title>. <source>Sci. Rep.</source> <volume>11</volume>, <fpage>2561</fpage>&#x2013;<lpage>2611</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-021-81590-0</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guo</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Kruger</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Kalra</surname>
<given-names>M. K.</given-names>
</name>
<name>
<surname>Yan</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Knowledge-Based Analysis for Mortality Prediction from CT Images</article-title>. <source>IEEE J. Biomed. Health Inf.</source> <volume>24</volume>, <fpage>457</fpage>&#x2013;<lpage>464</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2019.2946066</pub-id> </citation>
</ref>
<ref id="B16">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Hancock</surname>
<given-names>M. C.</given-names>
</name>
<name>
<surname>Magnan</surname>
<given-names>J. F.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Level Set Image Segmentation with Velocity Term Learned from Data with Applications to Lung Nodule Segmentation</article-title>. <comment>
<italic>arXiv [eess.IV]</italic>. Available at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/1910.03191">http://arxiv.org/abs/1910.03191</ext-link>
</comment> (<comment>Accessed March 31, 2022</comment>). </citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Havaei</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Davy</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Warde-Farley</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Biard</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Courville</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bengio</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Brain Tumor Segmentation with Deep Neural Networks</article-title>. <source>Med. Image Anal.</source> <volume>35</volume>, <fpage>18</fpage>&#x2013;<lpage>31</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2016.05.004</pub-id> </citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Heil</surname>
<given-names>B. J.</given-names>
</name>
<name>
<surname>Hoffman</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Markowetz</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>S. I.</given-names>
</name>
<name>
<surname>Greene</surname>
<given-names>C. S.</given-names>
</name>
<name>
<surname>Hicks</surname>
<given-names>S. C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Reproducibility Standards for Machine Learning in the Life Sciences</article-title>. <source>Nat. Methods</source> <volume>18</volume>, <fpage>1132</fpage>&#x2013;<lpage>1135</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-021-01256-7</pub-id> </citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hernandez-Boussard</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Bozkurt</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ioannidis</surname>
<given-names>J. P. A.</given-names>
</name>
<name>
<surname>Shah</surname>
<given-names>N. H.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>MINIMAR (MINimum Information for Medical AI Reporting): Developing Reporting Standards for Artificial Intelligence in Health Care</article-title>. <source>J. Am. Med. Inf. Assoc.</source> <volume>27</volume>, <fpage>2011</fpage>&#x2013;<lpage>2015</lpage>. <pub-id pub-id-type="doi">10.1093/jamia/ocaa088</pub-id> </citation>
</ref>
<ref id="B20">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Hirsch</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Kainmueller</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>An Auxiliary Task for Learning Nuclei Segmentation in 3D Microscopy Images</article-title>,&#x201d; in <source>
<italic>Proceedings Of the Third Conference On Medical Imaging With Deep Learning</italic> Proceedings of Machine Learning Research</source>. Editors <person-group person-group-type="editor">
<name>
<surname>Arbel</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ben Ayed</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>de Bruijne</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Descoteaux</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lombaert</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Pal</surname>
<given-names>C.</given-names>
</name>
</person-group> (<publisher-loc>Montreal, QC, Canada</publisher-loc>: <publisher-name>PML</publisher-name>), <fpage>304</fpage> </citation>
</ref>
<ref id="B21">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Hirsch</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Mais</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Kainmueller</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>PatchPerPix for Instance Segmentation</article-title>. <comment>
<italic>arXiv [cs.CV]</italic>. Available at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/2001.07626">http://arxiv.org/abs/2001.07626</ext-link>
</comment> (<comment>Accessed March 30, 2022</comment>). </citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hollandi</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Szkalisity</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Toth</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Tasnadi</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Molnar</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Mathe</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>nucleAIzer: A Parameter-free Deep Learning Framework for Nucleus Segmentation Using Image Style Transfer</article-title>. <source>Cell. Syst.</source> <volume>10</volume>, <fpage>453</fpage>&#x2013;<lpage>e6</lpage>. <pub-id pub-id-type="doi">10.1016/j.cels.2020.04.003</pub-id> </citation>
</ref>
<ref id="B23">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Isensee</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Kickingereder</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Wick</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Bendszus</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Maier-Hein</surname>
<given-names>K. H.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Brain Tumor Segmentation and Radiomics Survival Prediction: Contribution to the BRATS 2017 Challenge</article-title>,&#x201d; in <source>Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries</source> (<publisher-loc>Quebec City, Canada</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>287</fpage>&#x2013;<lpage>297</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-75238-9_25</pub-id> </citation>
</ref>
<ref id="B24">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Jaume</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Pati</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Anklin</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Foncubierta</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Gabrani</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>HistoCartography: A Toolkit for Graph Analytics in Digital Pathology</article-title>,&#x201d; in <source>
<italic>Proceedings Of the MICCAI Workshop On Computational Pathology</italic> Proceedings of Machine Learning Research.</source> Editors <person-group person-group-type="editor">
<name>
<surname>Atzori</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Burlutskiy</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Ciompi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Minhas</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>M&#xfc;ller</surname>
<given-names>H.</given-names>
</name>
</person-group> (<publisher-name>PMLR</publisher-name>), <fpage>117</fpage> </citation>
</ref>
<ref id="B25">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Kaul</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Manandhar</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Pears</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Focusnet: An Attention-Based Fully Convolutional Network for Medical Image Segmentation</article-title>,&#x201d; in <conf-name>2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019) (ieeexplore.ieee.org)</conf-name>, <fpage>455</fpage>&#x2013;<lpage>458</lpage>. <pub-id pub-id-type="doi">10.1109/isbi.2019.8759477</pub-id> </citation>
</ref>
<ref id="B26">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Keetha</surname>
<given-names>N. V.</given-names>
</name>
<name>
<surname>Samson</surname>
<given-names>A. B. P.</given-names>
</name>
<name>
<surname>Annavarapu</surname>
<given-names>C. S. R.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>U-det: A Modified U-Net Architecture with Bidirectional Feature Network for Lung Nodule Segmentation</article-title>. <comment>
<italic>arXiv [eess.IV]</italic>. Available at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/2003.09293">http://arxiv.org/abs/2003.09293</ext-link>
</comment> (<comment>Accessed March 31, 2022</comment>). </citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Khened</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kori</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Rajkumar</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Krishnamurthi</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Srinivasan</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A Generalized Deep Learning Framework for Whole-Slide Image Segmentation and Analysis</article-title>. <source>Sci. Rep.</source> <volume>11</volume>, <fpage>11579</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-021-90444-8</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Laine</surname>
<given-names>R. F.</given-names>
</name>
<name>
<surname>Arganda-Carreras</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Henriques</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Jacquemet</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Avoiding a Replication Crisis in Deep-Learning-Based Bioimage Analysis</article-title>. <source>Nat. Methods</source> <volume>18</volume>, <fpage>1136</fpage>&#x2013;<lpage>1144</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-021-01284-3</pub-id> </citation>
</ref>
<ref id="B29">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>LaLonde</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Torigian</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bagci</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Encoding Visual Attributes in Capsules for Explainable Medical Diagnoses</article-title>,&#x201d; in <source>Medical Image Computing and Computer Assisted Intervention &#x2013; MICCAI 2020</source> (<publisher-name>Springer International Publishing</publisher-name>), <fpage>294</fpage>&#x2013;<lpage>304</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-59710-8_29</pub-id> </citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Landset</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Khoshgoftaar</surname>
<given-names>T. M.</given-names>
</name>
<name>
<surname>Richter</surname>
<given-names>A. N.</given-names>
</name>
<name>
<surname>Hasanin</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>A Survey of Open Source Tools for Machine Learning with Big Data in the Hadoop Ecosystem</article-title>. <source>J. Big Data</source> <volume>2</volume>, <fpage>1</fpage>&#x2013;<lpage>36</lpage>. <pub-id pub-id-type="doi">10.1186/s40537-015-0032-1</pub-id> </citation>
</ref>
<ref id="B31">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Larrazabal</surname>
<given-names>A. J.</given-names>
</name>
<name>
<surname>Mart&#xed;nez</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Dolz</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ferrante</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Orthogonal Ensemble Networks for Biomedical Image Segmentation</article-title>,&#x201d; in <source>Medical Image Computing and Computer Assisted Intervention &#x2013; MICCAI 2021</source> (<publisher-loc>Strasbourg, France</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>594</fpage>&#x2013;<lpage>603</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-87199-4_56</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Le</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Gupta</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Hou</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Abousamra</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Fassler</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Torre-Healy</surname>
<given-names>L.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Utilizing Automated Breast Cancer Detection to Identify Spatial Distributions of Tumor-Infiltrating Lymphocytes in Invasive Breast Cancer</article-title>. <source>Am. J. Pathol.</source> <volume>190</volume>, <fpage>1491</fpage>&#x2013;<lpage>1504</lpage>. <pub-id pub-id-type="doi">10.1016/j.ajpath.2020.03.012</pub-id> </citation>
</ref>
<ref id="B33">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zung</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Jain</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Sebastian Seung</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Superhuman Accuracy on the SNEMI3D Connectomics Challenge</article-title>. <comment>
<italic>arXiv [cs.CV]</italic>. Available at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/1706.00120">http://arxiv.org/abs/1706.00120</ext-link>.</comment> </citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Fan</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>DeepSEED: 3D Squeeze-And-Excitation Encoder-Decoder Convolutional Neural Networks for Pulmonary Nodule Detection</article-title>. <source>Proc. IEEE Int. Symp. Biomed. Imaging</source> <volume>2020</volume>, <fpage>1866</fpage>&#x2013;<lpage>1869</lpage>. <pub-id pub-id-type="doi">10.1109/ISBI45749.2020.9098317</pub-id> </citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mongan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Moy</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Kahn</surname>
<given-names>C. E.</given-names>
<suffix>Jr</suffix>
</name>
</person-group> (<year>2020</year>). <article-title>Checklist for Artificial Intelligence in Medical Imaging (CLAIM): A Guide for Authors and Reviewers</article-title>. <source>Radiol. Artif. Intell.</source> <volume>2</volume>, <fpage>e200029</fpage>. <pub-id pub-id-type="doi">10.1148/ryai.2020200029</pub-id> </citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>M&#xfc;ller</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kramer</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>MIScnn: a Framework for Medical Image Segmentation with Convolutional Neural Networks and Deep Learning</article-title>. <source>BMC Med. Imaging</source> <volume>21</volume>, <fpage>12</fpage>. <pub-id pub-id-type="doi">10.1186/s12880-020-00543-7</pub-id> </citation>
</ref>
<ref id="B37">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Myronenko</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>3D MRI Brain Tumor Segmentation Using Autoencoder Regularization</article-title>,&#x201d; in <source>Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries</source> (<publisher-loc>Granada, Spain</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>311</fpage>&#x2013;<lpage>320</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-11726-9_28</pub-id> </citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Norgeot</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Quer</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Beaulieu-Jones</surname>
<given-names>B. K.</given-names>
</name>
<name>
<surname>Torkamani</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Dias</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Gianfrancesco</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Minimum Information about Clinical Artificial Intelligence Modeling: the MI-CLAIM Checklist</article-title>. <source>Nat. Med.</source> <volume>26</volume>, <fpage>1320</fpage>&#x2013;<lpage>1324</lpage>. <pub-id pub-id-type="doi">10.1038/s41591-020-1041-y</pub-id> </citation>
</ref>
<ref id="B39">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Oktay</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Schlemper</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Le Folgoc</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Heinrich</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Misawa</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Attention U-Net: Learning where to Look for the Pancreas</article-title>. <comment>
<italic>arXiv [cs.CV]</italic>. Available at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/1804.03999">http://arxiv.org/abs/1804.03999</ext-link>
</comment> (<comment>Accessed March 31, 2022</comment>). </citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>Q.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>A Survey on Transfer Learning</article-title>. <source>IEEE Transaction Knowl. Discov. Data Eng.</source> <volume>22</volume> (<issue>10</issue>), <fpage>191</fpage>. <pub-id pub-id-type="doi">10.1109/tkde.2009.191</pub-id> </citation>
</ref>
<ref id="B41">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Paper</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Simple Transfer Learning with TensorFlow Hub</article-title>,&#x201d; in <source>State-of-the-Art Deep Learning Models in TensorFlow: Modern Machine Learning in the Google Colab Ecosystem</source>. Editor <person-group person-group-type="editor">
<name>
<surname>Paper</surname>
<given-names>D.</given-names>
</name>
</person-group> (<publisher-loc>Berkeley, CA</publisher-loc>: <publisher-name>Apress</publisher-name>), <fpage>153</fpage>&#x2013;<lpage>169</lpage>. <pub-id pub-id-type="doi">10.1007/978-1-4842-7341-8_6</pub-id> </citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Paszke</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Gross</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Massa</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Lerer</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bradbury</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Chanan</surname>
<given-names>G.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>PyTorch: An Imperative Style, High-Performance Deep Learning Library</article-title>. <source>Adv. Neural Inf. Process. Syst.</source> <volume>32</volume>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://proceedings.neurips.cc/paper/2019/hash/bdbca288fee7f92f2bfa9f7012727740-Abstract.html">https://proceedings.neurips.cc/paper/2019/hash/bdbca288fee7f92f2bfa9f7012727740-Abstract.html</ext-link> (Accessed March 24, 2022)</comment>. </citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pati</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Jaume</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Foncubierta-Rodr&#xed;guez</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Feroce</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Anniciello</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Scognamiglio</surname>
<given-names>G.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Hierarchical Graph Representations in Digital Pathology</article-title>. <source>Med. Image Anal.</source> <volume>75</volume>, <fpage>102264</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2021.102264</pub-id> </citation>
</ref>
<ref id="B44">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Rakhlin</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Shvets</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Iglovikov</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Kalinin</surname>
<given-names>A. A.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Deep Convolutional Neural Networks for Breast Cancer Histology Image Analysis</article-title>,&#x201d; in <source>Image Analysis and Recognition</source> (<publisher-loc>P&#x00F3;voa de Varzim, Portugal</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>737</fpage>&#x2013;<lpage>744</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-93000-8_83</pub-id> </citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ribli</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Horv&#xe1;th</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Unger</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Pollner</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Csabai</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Detecting and Classifying Lesions in Mammograms with Deep Learning</article-title>. <source>Sci. Rep.</source> <volume>8</volume>, <fpage>4165</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-018-22437-z</pub-id> </citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saha</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Schmidt</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Barbotin</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Ji</surname>
<given-names>N.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Practical Sensorless Aberration Estimation for 3D Microscopy with Deep Learning</article-title>. <source>Opt. Express</source> <volume>28</volume>, <fpage>29044</fpage>&#x2013;<lpage>29053</lpage>. <pub-id pub-id-type="doi">10.1364/OE.401933</pub-id> </citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schindelin</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Arganda-Carreras</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Frise</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Kaynig</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Longair</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pietzsch</surname>
<given-names>T.</given-names>
</name>
<etal/>
</person-group> (<year>2012</year>). <article-title>Fiji: an Open-Source Platform for Biological-Image Analysis</article-title>. <source>Nat. Methods</source> <volume>9</volume>, <fpage>676</fpage>&#x2013;<lpage>682</lpage>. <pub-id pub-id-type="doi">10.1038/nmeth.2019</pub-id> </citation>
</ref>
<ref id="B48">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Schmarje</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zelenka</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Geisen</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Gl&#xfc;er</surname>
<given-names>C.-C.</given-names>
</name>
<name>
<surname>Koch</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>2D and 3D Segmentation of Uncertain Local Collagen Fiber Orientations in SHG Microscopy</article-title>,&#x201d; in <source>Pattern Recognition</source> (<publisher-loc>Dortmund, Germany</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>374</fpage>&#x2013;<lpage>386</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-33676-9_26</pub-id> </citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schneider</surname>
<given-names>C. A.</given-names>
</name>
<name>
<surname>Rasband</surname>
<given-names>W. S.</given-names>
</name>
<name>
<surname>Eliceiri</surname>
<given-names>K. W.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>NIH Image to ImageJ: 25 Years of Image Analysis</article-title>. <source>Nat. Methods</source> <volume>9</volume>, <fpage>671</fpage>&#x2013;<lpage>675</lpage>. <pub-id pub-id-type="doi">10.1038/nmeth.2089</pub-id> </citation>
</ref>
<ref id="B50">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Shailja</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Manjunath</surname>
<given-names>B. S.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Semi Supervised Segmentation and Graph-Based Tracking of 3D Nuclei in Time-Lapse Microscopy</article-title>,&#x201d; in <source>2021 IEEE 18th International Symposium on Biomedical Imaging</source> (<publisher-loc>Nice, France</publisher-loc>: <publisher-name>ISBI IEEE</publisher-name>), <fpage>385</fpage>&#x2013;<lpage>389</lpage>. <pub-id pub-id-type="doi">10.1109/isbi48211.2021.9433831</pub-id> </citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shen</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Margolies</surname>
<given-names>L. R.</given-names>
</name>
<name>
<surname>Rothstein</surname>
<given-names>J. H.</given-names>
</name>
<name>
<surname>Fluder</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>McBride</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Sieh</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Deep Learning to Improve Breast Cancer Detection on Screening Mammography</article-title>. <source>Sci. Rep.</source> <volume>9</volume>, <fpage>12495</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-019-48995-4</pub-id> </citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shen</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Phang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Park</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Tyagi</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>An Interpretable Classifier for High-Resolution Breast Cancer Screening Images Utilizing Weakly Supervised Localization</article-title>. <source>Med. Image Anal.</source> <volume>68</volume>, <fpage>101908</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2020.101908</pub-id> </citation>
</ref>
<ref id="B53">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Sonnenburg</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Braun</surname>
<given-names>M. L.</given-names>
</name>
<name>
<surname>Ong</surname>
<given-names>C. S.</given-names>
</name>
<name>
<surname>Bengio</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bottou</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Holmes</surname>
<given-names>G.</given-names>
</name>
<etal/>
</person-group> (<year>2007</year>). <article-title>The Need for Open Source Software in Machine Learning</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://www.jmlr.org/papers/volume8/sonnenburg07a/sonnenburg07a.pdf">https://www.jmlr.org/papers/volume8/sonnenburg07a/sonnenburg07a.pdf</ext-link>
</comment> (<comment>Accessed March 24, 2022</comment>). </citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Stringer</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Michaelos</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pachitariu</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Cellpose: a Generalist Algorithm for Cellular Segmentation</article-title>. <source>Nat. Methods</source> <volume>18</volume>, <fpage>100</fpage>&#x2013;<lpage>106</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-020-01018-x</pub-id> </citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Strubell</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Ganesh</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>McCallum</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Energy and Policy Considerations for Modern Deep Learning Research</article-title>. <source>AAAI</source> <volume>34</volume>, <fpage>13693</fpage>&#x2013;<lpage>13696</lpage>. <pub-id pub-id-type="doi">10.1609/aaai.v34i09.7123</pub-id> </citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tiwari</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sekhar</surname>
<given-names>A. K.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Workflow Based Framework for Life Science Informatics</article-title>. <source>Comput. Biol. Chem.</source> <volume>31</volume>, <fpage>305</fpage>&#x2013;<lpage>319</lpage>. <pub-id pub-id-type="doi">10.1016/j.compbiolchem.2007.08.009</pub-id> </citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tomita</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Abdollahi</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ren</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Suriawinata</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hassanpour</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Attention-Based Deep Neural Networks for Detection of Cancerous and Precancerous Esophagus Tissue on Histopathological Slides</article-title>. <source>JAMA Netw. Open</source> <volume>2</volume>, <fpage>e1914645</fpage>. <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2019.14645</pub-id> </citation>
</ref>
<ref id="B58">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Ullrich</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>van den Berg</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Brubaker</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Fleet</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Welling</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Differentiable Probabilistic Models of Scientific Imaging with the Fourier Slice Theorem</article-title>. <comment>
<italic>arXiv [cs.LG]</italic>. Available at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/1906.07582">http://arxiv.org/abs/1906.07582</ext-link>
</comment> (<comment>Accessed March 31, 2022</comment>). </citation>
</ref>
<ref id="B59">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Valanarasu</surname>
<given-names>J. M. J.</given-names>
</name>
<name>
<surname>Sindagi</surname>
<given-names>V. A.</given-names>
</name>
<name>
<surname>Hacihaliloglu</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Patel</surname>
<given-names>V. M.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>KiU-Net: Towards Accurate Segmentation of Biomedical Images Using Over-complete Representations</article-title>,&#x201d; in <source>Medical Image Computing and Computer Assisted Intervention &#x2013; MICCAI 2020</source> (<publisher-loc>Lima, Peru</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>363</fpage>&#x2013;<lpage>373</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-59719-1_36</pub-id> </citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vizca&#xed;no</surname>
<given-names>J. P.</given-names>
</name>
<name>
<surname>Saltarin</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Belyaev</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Lyck</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Lasser</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Favaro</surname>
<given-names>P.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Learning to Reconstruct Confocal Microscopy Stacks from Single Light Field Images</article-title>. <source>IEEE Trans. Comput. Imaging</source> <volume>7</volume>, <fpage>775</fpage>. <pub-id pub-id-type="doi">10.1109/TCI.2021.3097611</pub-id> </citation>
</ref>
<ref id="B61">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Ourselin</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Vercauteren</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Automatic Brain Tumor Segmentation Using Cascaded Anisotropic Convolutional Neural Networks</article-title>,&#x201d; in <source>Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries</source> (<publisher-loc>Quebec City, Canada</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>178</fpage>&#x2013;<lpage>190</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-75238-9_16</pub-id> </citation>
</ref>
<ref id="B62">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zha</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>TransBTS: Multimodal Brain Tumor Segmentation Using Transformer</article-title>,&#x201d; in <conf-name>Medical Image Computing and Computer Assisted Intervention &#x2013; MICCAI 2021</conf-name>, <conf-loc>Strasbourg, France</conf-loc>, <conf-date>September 27&#x2013;October 1, 2021</conf-date> (<publisher-name>Springer International Publishing</publisher-name>), <fpage>109</fpage>&#x2013;<lpage>119</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-87193-2_11</pub-id> </citation>
</ref>
<ref id="B63">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wei</surname>
<given-names>J. W.</given-names>
</name>
<name>
<surname>Tafe</surname>
<given-names>L. J.</given-names>
</name>
<name>
<surname>Linnik</surname>
<given-names>Y. A.</given-names>
</name>
<name>
<surname>Vaickus</surname>
<given-names>L. J.</given-names>
</name>
<name>
<surname>Tomita</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Hassanpour</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Pathologist-level Classification of Histologic Patterns on Resected Lung Adenocarcinoma Slides with Deep Neural Networks</article-title>. <source>Sci. Rep.</source> <volume>9</volume>, <fpage>3358</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-019-40041-7</pub-id> </citation>
</ref>
<ref id="B64">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Weigert</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Schmidt</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Haase</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Sugawara</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Myers</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Star-convex Polyhedra for 3d Object Detection and Segmentation in Microscopy</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision</conf-name>, <conf-loc>Snowmass village, CO, United States</conf-loc>, <conf-date>March 2&#x2013;5, 2020</conf-date> (<publisher-name>openaccess.thecvf.com</publisher-name>), <fpage>3666</fpage>&#x2013;<lpage>3673</lpage>. <pub-id pub-id-type="doi">10.1109/wacv45572.2020.9093435</pub-id> </citation>
</ref>
<ref id="B65">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>West</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ventura</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Warnick</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2007</year>). <source>Spring Research Presentation: A Theoretical Foundation for Inductive Transfer</source>, <volume>1</volume>. <publisher-loc>Provo, UT, United States</publisher-loc>: <publisher-name>Brigham Young University, College of Physical and Mathematical Sciences</publisher-name>. </citation>
</ref>
<ref id="B66">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wolf</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Debut</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Sanh</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Chaumond</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Delangue</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Moi</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). &#x201c;<article-title>Transformers: State-Of-The-Art Natural Language Processing</article-title>,&#x201d; in <conf-name>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations</conf-name>, <conf-loc>Online</conf-loc>, <conf-date>November 16&#x2013;20, 2020</conf-date> (<publisher-name>Online: Association for Computational Linguistics</publisher-name>). <pub-id pub-id-type="doi">10.18653/v1/2020.emnlp-demos.6</pub-id> </citation>
</ref>
<ref id="B67">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Cox</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Lotter</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Conditional Infilling GANs for Data Augmentation in Mammogram Classification</article-title>,&#x201d; in <source>Image Analysis for Moving Organ, Breast, and Thoracic Images</source> (<publisher-loc>Granada, Spain</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>98</fpage>&#x2013;<lpage>106</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-00946-5_11</pub-id> </citation>
</ref>
<ref id="B68">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Rivenson</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Ben-David</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Bentolila</surname>
<given-names>L. A.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Three-dimensional Virtual Refocusing of Fluorescence Microscopy Images Using Deep Learning</article-title>. <source>Nat. Methods</source> <volume>16</volume>, <fpage>1323</fpage>&#x2013;<lpage>1331</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-019-0622-5</pub-id> </citation>
</ref>
<ref id="B69">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yakimovich</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Beaugnon</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Ozkirimli</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Labels in a Haystack: Approaches beyond Supervised Learning in Biomedical Applications</article-title>. <source>Patterns</source> <volume>2</volume>, <fpage>100383</fpage>. <pub-id pub-id-type="doi">10.1016/j.patter.2021.100383</pub-id> </citation>
</ref>
<ref id="B70">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Deng</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>W.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Can Fine-Tuning Pre-trained Models Lead to Perfect Nlp? a Study of the Generalizability of Relation Extraction</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://openreview.net/forum?id=3yzIj2-eBbZ">https://openreview.net/forum?id&#x3d;3yzIj2-eBbZ</ext-link>
</comment> (<comment>Accessed April 4, 2022</comment>). </citation>
</ref>
<ref id="B71">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Zhong</surname>
<given-names>E. D.</given-names>
</name>
<name>
<surname>Bepler</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Davis</surname>
<given-names>J. H.</given-names>
</name>
<name>
<surname>Berger</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Reconstructing Continuous Distributions of 3D Protein Structure from Cryo-EM Images</article-title>. <comment>
<italic>arXiv [q-bio.QM]</italic>. Available at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/1909.05215">http://arxiv.org/abs/1909.05215</ext-link>
</comment> (<comment>Accessed March 30, 2022</comment>). </citation>
</ref>
<ref id="B72">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Zhu</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Fan</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2018a</year>). &#x201c;<article-title>DeepLung: Deep 3D Dual Path Nets for Automated Pulmonary Nodule Detection and Classification</article-title>,&#x201d; in <conf-name>2018 IEEE Winter Conference on Applications of Computer Vision (WACV) (ieeexplore.ieee.org)</conf-name>, <conf-loc>Granada, Spain</conf-loc>, <conf-date>March 12&#x2013;15, 2018</conf-date>, <fpage>673</fpage>&#x2013;<lpage>681</lpage>. <pub-id pub-id-type="doi">10.1109/wacv.2018.00079</pub-id> </citation>
</ref>
<ref id="B73">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Zhu</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Vang</surname>
<given-names>Y. S.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2018b</year>). &#x201c;<article-title>DeepEM: Deep 3D ConvNets with EM for Weakly Supervised Pulmonary Nodule Detection</article-title>,&#x201d; in <conf-name>Medical Image Computing and Computer Assisted Intervention &#x2013; MICCAI 2018 (Springer International Publishing)</conf-name>, <conf-loc>Lake Tahoe, NV, United States</conf-loc>, <conf-date>March 12&#x2013;15, 2018</conf-date>, <fpage>812</fpage>&#x2013;<lpage>820</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-00934-2_90</pub-id> </citation>
</ref>
<ref id="B74">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zunair</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ben Hamza</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Melanoma Detection Using Adversarial Training and Deep Transfer Learning</article-title>. <source>Phys. Med. Biol.</source> <volume>65</volume>, <fpage>135005</fpage>. <pub-id pub-id-type="doi">10.1088/1361-6560/ab86d3</pub-id> </citation>
</ref>
</ref-list>
</back>
</article>