<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Pediatr.</journal-id>
<journal-title>Frontiers in Pediatrics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Pediatr.</abbrev-journal-title>
<issn pub-type="epub">2296-2360</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fped.2023.1149318</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Pediatrics</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Automated measurement of penile curvature using deep learning-based novel quantification method</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author"><name><surname>Baray</surname><given-names>Sriman Bidhan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2177602/overview"/></contrib>
<contrib contrib-type="author"><name><surname>Abdelmoniem</surname><given-names>Mohamed</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/1913219/overview" /></contrib>
<contrib contrib-type="author"><name><surname>Mahmud</surname><given-names>Sakib</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/761207/overview" /></contrib>
<contrib contrib-type="author"><name><surname>Kabir</surname><given-names>Saidul</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2191994/overview" /></contrib>
<contrib contrib-type="author"><name><surname>Faisal</surname><given-names>Md. Ahasan Atick</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2248337/overview"/></contrib>
<contrib contrib-type="author"><name><surname>Chowdhury</surname><given-names>Muhammad E. H.</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/616948/overview" /></contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Abbas</surname><given-names>Tariq O.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/479402/overview" /></contrib>
</contrib-group>
<aff id="aff1"><label><sup>1</sup></label><addr-line>Department of Electrical and Electronic Engineering</addr-line>, <institution>University of Dhaka</institution>, <addr-line>Dhaka</addr-line>, <country>Bangladesh</country></aff>
<aff id="aff2"><label><sup>2</sup></label><addr-line>Department of Electrical Engineering</addr-line>, <institution>College of Engineering, Qatar University</institution>, <addr-line>Doha</addr-line>, <country>Qatar</country></aff>
<aff id="aff3"><label><sup>3</sup></label><addr-line>Department of Surgery</addr-line>, <institution>Weill Cornell Medicine-Qatar</institution>, <addr-line>Ar-Rayyan</addr-line>, <country>Qatar</country></aff>
<aff id="aff4"><label><sup>4</sup></label><addr-line>Urology Division, Surgery Department</addr-line>, <institution>Sidra Medicine</institution>, <addr-line>Doha</addr-line>, <country>Qatar</country></aff>
<aff id="aff5"><label><sup>5</sup></label><addr-line>College of Medicine</addr-line>, <institution>Qatar University</institution>, <addr-line>Doha</addr-line>, <country>Qatar</country></aff>
<author-notes>
<fn fn-type="edited-by"><p><bold>Edited by:</bold> Alexander Springer, Medical University of Vienna, Austria</p></fn>
<fn fn-type="edited-by"><p><bold>Reviewed by:</bold> Gilvydas Verkauskas, Vilnius University, Lithuania Adam Benjamin Hittelman, Yale University, United States</p></fn>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Tariq O. Abbas <email>tariq2c@hotmail.com</email></corresp>
<fn fn-type="other" id="fn001"><p><bold>Specialty Section:</bold> This article was submitted to Pediatric Urology, a section of the journal Frontiers in Pediatrics</p></fn>
</author-notes>
<pub-date pub-type="epub"><day>17</day><month>04</month><year>2023</year></pub-date>
<pub-date pub-type="collection"><year>2023</year></pub-date>
<volume>11</volume><elocation-id>1149318</elocation-id>
<history>
<date date-type="received"><day>21</day><month>01</month><year>2023</year></date>
<date date-type="accepted"><day>13</day><month>03</month><year>2023</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Baray, Abdelmoniem, Mahmud, Kabir, Faisal, Chowdhury and Abbas.</copyright-statement>
<copyright-year>2023</copyright-year><copyright-holder>Baray, Abdelmoniem, Mahmud, Kabir, Faisal, Chowdhury and Abbas</copyright-holder><license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<sec><title>Objective</title>
<p>Develop a reliable, automated deep learning-based method for accurate measurement of penile curvature (PC) using 2-dimensional images.</p>
</sec>
<sec><title>Materials and methods</title>
<p>A set of nine 3D-printed models was used to generate a batch of 913 images of penile curvature (PC) with varying configurations (curvature range 18&#x00B0; to 86&#x00B0;). The penile region was initially localized and cropped using a YOLOv5 model, after which the shaft area was extracted using a UNet-based segmentation model. The penile shaft was then divided into three distinct predefined regions: the distal zone, curvature zone, and proximal zone. To measure PC, we identified four distinct locations on the shaft that reflected the mid-axes of proximal and distal segments, then trained an HRNet model to predict these landmarks and calculate curvature angle in both the 3D-printed models and masked segmented images derived from these. Finally, the optimized HRNet model was applied to quantify PC in medical images of real human patients and the accuracy of this novel method was determined.</p>
</sec>
<sec><title>Results</title>
<p>We obtained a mean absolute error (MAE) of angle measurement &#x003C;5&#x00B0; for both penile model images and their derivative masks. For real patient images, AI prediction varied between 1.7&#x00B0; (for cases of &#x223C;30&#x00B0; PC) and approximately 6&#x00B0; (for cases of 70&#x00B0; PC) compared with assessment by a clinical expert.</p>
</sec>
<sec><title>Discussion</title>
<p>This study demonstrates a novel approach to the automated, accurate measurement of PC that could significantly improve patient assessment by surgeons and hypospadiology researchers. This method may overcome current limitations encountered when applying conventional methods of measuring arc-type PC.</p>
</sec>
</abstract>
<kwd-group>
<kwd>penile curvature</kwd>
<kwd>artificial intelligence</kwd>
<kwd>machine learning</kwd>
<kwd>YOLO</kwd>
<kwd>UNET</kwd>
<kwd>HRNet</kwd>
<kwd>hypospadias</kwd>
<kwd>chordee</kwd>
</kwd-group>
<contract-num rid="cn001">20841</contract-num>
<contract-sponsor id="cn001">Hamad Medical Corporation Medical Research Center</contract-sponsor>
<counts>
<fig-count count="9"/>
<table-count count="5"/><equation-count count="12"/><ref-count count="46"/><page-count count="0"/><word-count count="0"/></counts>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro"><label>1.</label><title>Introduction</title>
<p>Congenital penile curvature (PC) is typically caused by abnormalities in genital development, such as chordee or hypospadias. Approximately 1 in 300 newborn males exhibit hypospadias (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>), with an estimated one-third of individuals also presenting with notable PC (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B4">4</xref>). This condition is thought to result from arrested embryological development of the ventral axis of the penile shaft, often leading to insufficient skin, abnormally short urethral plate, and ventro-dorsal corporeal disproportion (<xref ref-type="bibr" rid="B5">5</xref>&#x2013;<xref ref-type="bibr" rid="B7">7</xref>). In some cases, congenital PC may coexist with a normal meatus but deficient urethra, termed chordee without hypospadias (<xref ref-type="bibr" rid="B8">8</xref>). Penile curvature can also occur even when the urethra is completely normal, which is thought to affect &#x223C;0.6&#x0025; of newborn boys (<xref ref-type="bibr" rid="B9">9</xref>).</p>
<p>PC may develop in a variety of contexts, although it is more prevalent and appears earlier in patients with hypospadias, necessitating early examination and treatment. In situations of Hypospadias, tiny differences in the degree of PC can significantly impact surgical decision-making and the ultimate choice of repair procedure (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B11">11</xref>). A prior study of pediatric urologists found that a highly variable fraction chose no intervention when the amount of PC varied from 10&#x00B0; (69&#x0025;), to 20&#x00B0; (64&#x0025;), or 30&#x00B0; (16&#x0025;) (<xref ref-type="bibr" rid="B12">12</xref>). At the same time, 66&#x0025; of the urologists used dorsal correction with a PC of 40&#x00B0;, compared to 47&#x0025; of respondents for a PC of 50&#x00B0; (<xref ref-type="bibr" rid="B13">13</xref>). Notably, around 37&#x0025; of readings acquired using a goniometer and eye assessment alone may result in needless surgical treatments (<xref ref-type="bibr" rid="B14">14</xref>). If not treated properly, PC can persist into adulthood and cause further complex patient issues (<xref ref-type="bibr" rid="B11">11</xref>, <xref ref-type="bibr" rid="B15">15</xref>, <xref ref-type="bibr" rid="B16">16</xref>). In order to adequately identify the severity of hypospadias, it is of the utmost essential to assess the degree of PC accurately.</p>
<p>Although PC extent has substantial clinical relevance and predictive significance, evaluation of this disorder is inconsistent across surgeons, with no rapid and reliable measurement techniques available at present (<xref ref-type="bibr" rid="B11">11</xref>, <xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B18">18</xref>). Current approaches typically involve visual assessment upon artificial erection induced by saline injection (<xref ref-type="bibr" rid="B19">19</xref>). However, recent developments in artificial intelligence (AI) have revolutionized many medical sectors including radiology, pathology, ophthalmology, and cardiology (<xref ref-type="bibr" rid="B20">20</xref>&#x2013;<xref ref-type="bibr" rid="B24">24</xref>). Numerous urology subspecialties including endourology, reproductive medicine, stones, hydronephrosis, malignancies, and pediatric urology have already benefited from the use of AI applications, which can be used to perform automatic segmentation, classification, registration, and analysis of medical images (<xref ref-type="bibr" rid="B25">25</xref>&#x2013;<xref ref-type="bibr" rid="B27">27</xref>). In this way, AI can provide highly accurate predictions that inform rapid patient diagnosis and treatment decisions. AI tools can outperform conventional statistical methods in terms of prediction accuracy, and if integrated into relevant guidelines, may completely transform the way that urologists make clinical decisions (<xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B29">29</xref>).</p>
<p>To measure PC, current methods involve unassisted visual inspection, a goniometer, or mobile app-based angle measurements. However, due to their high subjectivity and poor inter- and intra-observer agreement, all of these procedures are intrinsically flawed (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B31">31</xref>). During surgery, normal saline is often injected into the penis to assess curvature, which must be quantified in real-time to reduce surgery duration and minimize fluid leakage from the operation site. Considering these major limitations of PC measurement, Abbas et al<italic>.</italic> (<xref ref-type="bibr" rid="B32">32</xref>), proposed an automatic quantification method which involved penile area localization, shaft segmentation, and angle calculation using a novel AI-based tool. While localization and segmentation aspects achieved satisfactory results, angle calculation sometimes failed when applied to non-uniform masks. To overcome this limitation, here we developed a novel approach to calculate the axes of the penile shaft using two pairs of key points that no longer depend on arc area. Additionally, to better automate angle measurement, we trained and validated an HRNet-based deep learning model which can measure curvature angles more precisely, even when applied to non-uniform real-life anatomy.</p>
</sec>
<sec id="s2"><label>2.</label><title>Method</title>
<p>Our previous pipeline for autonomous measurement of penile curvature (PC) consisted of three distinct steps: automated localization of the penile area, segmentation of the penile shaft, and angle computation. Due to inadequate performance with real-world cases, in the current study, we developed an alternative pipeline in which we incorporate the earlier steps but focused on detecting key points (as shown in <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>). For automated localization of the penile area, a YoloV5l network was trained to predict a bounding box around the relevant region and then crop this to a predefined shape. For the segmentation of the penile shaft, several UNet models (encoder-decoder) including state-of-the-art convolutional neural network (CNN) models were used to create binary masks identifying the penile shaft. For the key point assignment task, a Deep Learning model, HRNet was trained and validated to recognize two pairs of crucial points on the penile shaft (either from cropped pictures or derivative masks). Finally, the curvature angle was computed using two vectors drawn through the vertices of these key points. This end-to-end pipeline automates the whole process of PC measurement which takes the 2D penile model images as input and gives the calculated angle as output. Behind the scene, the trained YOLOv5l model identifies the penile area, the segmentation model generates the penile shaft mask, the HRNet model predicts the vectors through the proximal and distal area of the shaft and the angle between the vectors is calculated automatically to show the penile curvature angle as an output.</p>
<fig id="F1" position="float"><label>Figure 1</label>
<caption><p>Penile angle measurement pipeline.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fped-11-1149318-g001.tif"/>
</fig>
<sec id="s2a"><label>2.1.</label><title>Dataset description</title>
<p>The dataset employed in this study was previously described by Abbas et al<italic>.</italic> (<xref ref-type="bibr" rid="B32">32</xref>), and consisted of 913 total images generated using <italic>n</italic>&#x2009;&#x003D;&#x2009;9 3D-printed penile models with different curvature angles (ranging from 18&#x00B0; to 86&#x00B0;) as shown in <xref ref-type="sec" rid="s10">Supplementary Figure 1</xref>. The models were designed by a 3D model developer before resizing the stereolithography (STL) files to dimensions appropriate for children (1.5&#x2005;cm wide and 5&#x2013;6&#x2005;cm long). The penile models were then photographed with a triple-lens iPhone 11 Pro Max mobile camera with a 12-megapixel resolution. The camera was set 20&#x2013;25&#x2005;cm away from each model and moved along the horizontal and vertical axes (&#x2212;5&#x00B0;, 5&#x00B0;) and (0&#x00B0;, 20&#x00B0;), respectively. For each model around 100 pictures were captured at different camera positions (penile models&#x0027; angles and number of images are listed in <xref ref-type="sec" rid="s10">Supplementary Table 1</xref>).</p>
</sec>
<sec id="s2b"><label>2.2.</label><title>Penile area localization</title>
<p>To reduce image complexity, the penile area was localized in each photograph and images were then cropped to retain only this area, thereby eliminating the irrelevant background. Localizing and cropping the penile area also reduced the amount of input data that required processing in subsequent steps of the pipeline, thus making the procedure faster and more efficient (an overview of this process is shown in <xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref>).</p>
<fig id="F2" position="float"><label>Figure 2</label>
<caption><p>Penile area localization and cropping.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fped-11-1149318-g002.tif"/>
</fig>
<p>We first annotated all 913 images with appropriate bounding boxes and then automated this process using a YOLOv5 model (a single-stage object detector consisting of three components: a Backbone, a Neck, and a Head for making dense predictions). The YOLO (You Only Look Once) technique for identifying objects involves first splitting the picture into a grid of cells and then calculating the probability that an item is located in each of those cells. For each cell that could hold an object, YOLO calculates an estimated bounding box and class. The probability of an object&#x0027;s presence in a given cell is predicted using a Deep Neural Network. Once complete, the model was able to process any raw photograph into an image shaped 256&#x2009;&#x00D7;&#x2009;256 pixels consisting of only the penile area.</p>
<p>All YOLOv5 models are composed of the same 3 components: CSP-Darknet53 as a backbone, SPP and PANet in the model neck, and the head used in YOLOv4 (<xref ref-type="bibr" rid="B33">33</xref>). There is no difference between the five YOLOv5 models&#x2014;nano (n), small (s), medium (m), large (l), and extra-large (x) in terms of operations used (only the number of layers varies). YOLOv5 employs SiLU (Sigmoid Linear Unit) and Sigmoid activation functions. Three outputs are provided by YOLOv5: the classes of the identified objects, their bounding boxes, and objectness ratings (the model&#x0027;s confidence that a particular region in an image contains an object). The class loss and the objectness loss are then computed using BCE (Binary Cross Entropy). CIoU (Complete Intersection over Union) is an improved penalty function, which helps to improve localization accuracy. Additionally, YOLOv5 employs the Focus Layer to replace the first three layers of the network, thereby reducing the number of parameters, floating point operations per second (FLOPS), and Compute Unified Device Architecture (CUDA) memory required. YOLOv5 also eliminates Grid Sensitivity by using a centre point offset range from &#x2212;0.5 to 1.5 (instead of just 0 to 1) thus allowing the detection of objects in the corners of images. YOLOv5 is written on Pytorch rather than C, giving more flexibility to control encoding operations. The overall architecture of YOLOv5 is shown in <xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref>.</p>
<fig id="F3" position="float"><label>Figure 3</label>
<caption><p>YOLOv5 architecture used for penile area cropping from still 2D images.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fped-11-1149318-g003.tif"/>
</fig>
<p>The YOLOv5 architecture is independent of the inference size, safe for stride multiple constraints. Two variables namely <italic>model depth multiple</italic> and <italic>layer channel multiple</italic> are used for model scaling, and compound scaling when used jointly. The depth multiple determines how many convolutional layers are used in the model, and it is typically set to a value between 0.33 (YOLOv5-n) and 1.33 (YOLOv5-x). For example, if the depth multiple is set to 0.33, the number of convolutional layers in the model will be roughly one-third of the default number of layers. For YOLOv5-l the model depth multiple is set to 1.0. The width multiple determines the width of the model, which is proportional to the number of filters in the convolutional layers. Increasing the width multiple results in a wider and more complex model with more parameters, while decreasing the width multiple results in a smaller and simpler model with fewer parameters. The width multiple is typically set to a value between 0.25 and 1.25, for YOLOv5-l, it is set to 1.0. In total, there are about 46.5 million parameters in YOLOv5-l.</p>
</sec>
<sec id="s2c"><label>2.3.</label><title>Penile shaft segmentation</title>
<p>All cropped images were manually annotated using &#x201C;labelme&#x201D; (<xref ref-type="bibr" rid="B34">34</xref>) to mark the penile shaft (example shown in <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref>). The images were then divided into train-test sets for the different segmentation models. We used UNet (encoder-decoder) models for the segmentation task after considering several cutting-edge designs, including UNet3&#x002B; (<xref ref-type="bibr" rid="B35">35</xref>), MultiResUNet (<xref ref-type="bibr" rid="B36">36</xref>), and Ensambled UNet (<xref ref-type="bibr" rid="B37">37</xref>). Different backbone networks, such as ResNet50 (<xref ref-type="bibr" rid="B38">38</xref>), DenseNet121 (<xref ref-type="bibr" rid="B39">39</xref>), inceptionv3 (<xref ref-type="bibr" rid="B40">40</xref>), and EfficienetNetV2M (<xref ref-type="bibr" rid="B41">41</xref>), were employed to assess each of these models.</p>
<fig id="F4" position="float"><label>Figure 4</label>
<caption><p>Identifying four key points of the penile shaft on cropped image and generated mask.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fped-11-1149318-g004.tif"/>
</fig>
<p>In the typical U-Net design, up-sampling blocks and pooling operators are employed in the expanding decoder route and the contracting encoder path, respectively. Ensemble UNet introduces a built-in ensemble of U-Nets of varying depths in UNet&#x002B;&#x002B;, thus enabling improved segmentation performance for varying-size objects. Additionally, in UNet 3&#x002B;, each decoder layer combines smaller- and same-scale feature maps from the encoder with larger-scale feature maps from the decoder, thereby capturing both fine- and coarse-grained semantics in complete scales. To incorporate multiresolution analysis, taking inspiration from Inception family networks, MultiResUNet uses MultiRes block which replaces the convolutional layer pairs in the original U-Net. This configuration is derived from incorporating and factorizing 5&#x2009;&#x00D7;&#x2009;5 and 7&#x2009;&#x00D7;&#x2009;7 convolution operations into 3&#x2009;&#x00D7;&#x2009;3 format, then reusing these to obtain results from 3&#x2009;&#x00D7;&#x2009;3, 5&#x2009;&#x00D7;&#x2009;5 and 7&#x2009;&#x00D7;&#x2009;7 convolution operations simultaneously. Moreover, the skip connections in the UNet network may introduce some disparity between features as the encoders may offer lower-level features compared to the decoders. To overcome the semantic gap between the merged features from the encoder and decoder, convolutional layers with residual paths are employed. These are called Res paths that have 3&#x2009;&#x00D7;&#x2009;3 filters as convolution layers and 1&#x2009;&#x00D7;&#x2009;1 as the residual connection.</p>
</sec>
<sec id="s2d"><label>2.4.</label><title>Key-points detection</title>
<p>To estimate penile curvature from 2D images, we tested a new technique based on identifying four key points on the penile shaft. The rationale for selecting these four points is discussed in the following section. We designated four key points for all images and then used these annotations to train and validate an HRNet deep learning model.</p>
<sec id="s2d1"><label>2.4.1.</label><title>Defining key-points and annotation</title>
<p>The penile shaft was divided into 3 three distinct zones: distal shaft, curvature region, and proximal shaft. The curvature zone is defined as bounded by two curved ventral and dorsal borders, while both the distal shaft and proximal shaft have borders defined by ventral and dorsal straight lines. To measure inclination, mid-axes were drawn through the distal and proximal shaft zones. The border points of these lines were then marked as two pairs of key points. The full process is outlined in <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref>. This approach was used to annotate all the input images with the relevant key points (at the same time, the inclination angle of the annotations was verified to ensure this didn&#x0027;t deviate more than 5<sup>&#x00B0;</sup> from ground truth).</p>
<p>The motivation behind using the 4 dots approach instead of using the typical 3-dot one is to come up with a generalized approach for both hinge-type and arc-type penile shafts. As shown in <xref ref-type="fig" rid="F5">Figure&#x00A0;5</xref>, defining 4 key points works for hinge-type shafts and is applicable for arc-type shafts. On the other hand, even though defining 3 points, used in previous studies (<xref ref-type="bibr" rid="B11">11</xref>, <xref ref-type="bibr" rid="B31">31</xref>, <xref ref-type="bibr" rid="B32">32</xref>, <xref ref-type="bibr" rid="B42">42</xref>, <xref ref-type="bibr" rid="B43">43</xref>), to measure the curvature angle could work for hinge-type shafts, it should fail in case of arc-type curvature providing misleading values.</p>
<fig id="F5" position="float"><label>Figure 5</label>
<caption><p>Advantage of the 4 key-points approach for hinge-type and arc-type penile shafts.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fped-11-1149318-g005.tif"/>
</fig>
</sec>
<sec id="s2d2"><label>2.4.2.</label><title>HRNet model</title>
<p>Penile shaft assessment was performed using the HRNetV2-18 CNN architecture which is designed for landmark detection (<xref ref-type="bibr" rid="B44">44</xref>). HRNetV2-18 is a variant of the HRNet architecture which has already been used in a variety of computer vision tasks. HRNetV2 is based on the idea of using multiple parallel &#x201C;branches&#x201D; of convolutional layers, each of which processes the input image at a different resolution. These branches are then combined in a &#x201C;fusion&#x201D; step, whereby the output of each branch is concatenated and processed by additional convolutional layers to produce the final output. This allows the network to learn features at multiple scales, which is crucial for accurate key point detection since penile shafts can vary significantly in size and appearance.</p>
<p>Input to the HRNetV2-18 network is first processed <italic>via</italic> a series of convolutional layers which reduce the spatial resolution of each image and extract low-level features. The output of these initial layers is then fed into parallel branches, where the features are further refined at different scales. Finally, outputs from the branches are concatenated and processed using additional convolutional layers to produce the final output as shown in <xref ref-type="fig" rid="F6">Figure&#x00A0;6</xref>. Low-resolution representations are rescaled <italic>via</italic> bilinear up-sampling to achieve high resolution. Subsets of representations are then concatenated, resulting in high-resolution composites that can be used to estimate segmentation maps/landmark heat maps. Output representations from all four resolutions are mixed through 1&#x2009;&#x00D7;&#x2009;1 convolution to produce a final 15C-dimensional representation. For each position, the mixed representation is passed to a linear regressor with mean square error (MSE) loss to predict segmentation key-point heat maps. HRNetV2-W18 has previously been shown to achieve state-of-the-art performance in a variety of landmark detection tasks, and can accurately localize a wide range of landmarks even in very challenging scenarios (such as low-resolution images or pictures with large pose variations).</p>
<fig id="F6" position="float"><label>Figure 6</label>
<caption><p>HRNetV2 architecture.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fped-11-1149318-g006.tif"/>
</fig>
</sec>
</sec>
<sec id="s2e"><label>2.5.</label><title>Angle estimation</title>
<p>Once the HRNet model has predicted 4 key points denoting the distal mid-axis dots (DMD) and proximal mid-axis dots (PMD), we then proceed to calculate two vectors to identify the lines shown in <xref ref-type="fig" rid="F7">Figure&#x00A0;7</xref>.</p>
<fig id="F7" position="float"><label>Figure 7</label>
<caption><p>Angle calculation process using 4 predicted key points (distal mid-axis dots, DMD; and proximal mid-axis dots, PMD).</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fped-11-1149318-g007.tif"/>
</fig>
<p>HRNet returns 4 key points: DMD_top (<italic>x</italic><sub>1</sub>, <italic>y</italic><sub>1</sub>), DMD_top (<italic>x<sub>2</sub></italic>, <italic>y</italic><sub>2</sub>), PMD_top (<italic>x</italic><sub>3</sub>, <italic>y</italic><sub>3</sub>) and PMD_top (<italic>x</italic><sub>4</sub>, <italic>y</italic><sub>4</sub>). These landmarks can then be used to calculate the distal mid-axis vector <italic>v</italic><sub>1</sub> and proximal mid-axis vector <italic>v</italic><sub>2</sub> using the following equations;<disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="UDM1"><mml:msub><mml:mrow><mml:mi mathvariant="bold-italic">v</mml:mi></mml:mrow><mml:mn>1</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mspace width="thickmathspace" /><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mi mathvariant="bold-italic">i</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mi mathvariant="bold-italic">j</mml:mi></mml:mrow></mml:math></disp-formula><disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="UDM2"><mml:msub><mml:mrow><mml:mi mathvariant="bold-italic">v</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mrow><mml:mspace width="thickmathspace" /></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mn>4</mml:mn></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mi mathvariant="bold-italic">i</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mn>4</mml:mn></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mi mathvariant="bold-italic">j</mml:mi></mml:mrow></mml:math></disp-formula>After calculating the vectors, we determined the angle between these vectors using the equation below;<disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="UDM3"><mml:mi>&#x03B8;</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mspace width="thickmathspace" /><mml:mi mathvariant="normal">co</mml:mi></mml:mrow><mml:msup><mml:mrow><mml:mi mathvariant="normal">s</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi mathvariant="bold-italic">v</mml:mi></mml:mrow><mml:mn>1</mml:mn></mml:msub><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="bold-italic">v</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi mathvariant="bold-italic">v</mml:mi></mml:mrow><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi mathvariant="bold-italic">v</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>Each vector was defined by two values in an array, depicting the components on both horizontal and vertical axes, then Python was used to perform all subsequent calculations. Instead of typical slope-based angle calculation, this vector-based approach yields more reliable results by providing directional information to avoid confusion when angles approach 90&#x00B0;.</p>
</sec>
<sec id="s2f"><label>2.6.</label><title>Experimental setup</title>
<p>For all experiments, the penile area localization and segmentation steps were performed using 5-fold cross-validation. As there were a total of 9 plastic model images, for each of the first 4 folds we had images from two plastic models and for the last fold we had images from one plastic model. For the model training, each time one fold was used as testing data and others as training data. We further split the training data in a random stratified manner keeping 20&#x0025; for validation and the rest for training. For key point detection, we performed 9-fold cross-validation (7 model image sets were used for training, 1 for validation, and 1 for testing repeating 9 times). In all cases, to increase the variety of the training dataset, we randomly applied various augmentations including random horizontal flip, random brightness contrast, random gamma, random RGB shift, shift-scale-rotate, perspective shift, and rotation, thereby increasing the total number of training images 5-fold.</p>
<p>The YOLOv5 model used included 36 layers with 46,138,294 parameters. SGD (Stochastic gradient descent) optimizer was used with a learning rate of 0.01. A total of 100 epochs were trained with batch size 16. For detection, we used predictions with a greater than 0.75 confidence score to prevent false or multiple detections.</p>
<p>For the segmentation step, each model was trained in two separate phases. In the first phase, each UNet (encoder-decoder) was trained for 200 epochs while the encoder part was untrained using imagenet (<xref ref-type="bibr" rid="B45">45</xref>) pre-trained weights only. A model width of 16 and a model depth of 5 were used for all settings. The learning rate was 0.0001 and there was patience of 20 epochs (meaning that training will stop if the validation error doesn&#x0027;t decrease for 20 consecutive epochs). In the second phase, the entire model was trained for 100 epochs, unfreezing the encoder step with a low learning rate of 0.00005. Patience was set to 10 epochs and the batch size was 4. Binary Cross Entropy was used as a loss function. The best model was selected based on validation mean squared error.</p>
<p>In the case of HRNet training, we used 30 epochs with imagenet pre-trained weights and a batch size of 16. The optimizer used was &#x201C;Adam&#x201D; and the learning rate was 0.0001.</p>
</sec>
<sec id="s2g"><label>2.7.</label><title>Testing on real images</title>
<p>The HRNet model was initially trained on masks from penile model images and then assessed for performance with real patient cases (using 4 intraoperative lateral penile images captured under erection test, from publicly available sources). Images were segmented manually to generate masks and the HRNet model was used to predict key points on the masks.</p>
</sec>
<sec id="s2h"><label>2.8.</label><title>Evaluation metrics</title>
<sec id="s2h1"><label>2.8.1.</label><title>Object detection evaluation metric</title>
<p>The performance of the penile area localization network was assessed using mean average precision (mAP). AP is the area under the precision-recall curve, and mAP is the average AP across all classes. mAP@0.5 indicates that the average AP for IoU (Intersection over Union) is 0.5, while mAP@[.5:.95] corresponds to the average AP for IoU from 0.5 to 0.95, with a step size of 0.05.<disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="UDM4"><mml:mrow><mml:mi mathvariant="normal">mAP</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mspace width="thickmathspace" /><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac></mml:mrow><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mrow><mml:mrow><mml:mi mathvariant="normal">A</mml:mi></mml:mrow><mml:msub><mml:mrow><mml:mi mathvariant="normal">P</mml:mi></mml:mrow><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mstyle></mml:math></disp-formula>where <italic>n</italic> is the number of classes (in this case only one: the penile area).</p>
</sec>
<sec id="s2h2"><label>2.8.2.</label><title>Segmentation evaluation metrics</title>
<p>Three assessment metrics&#x2014;model accuracy, intersection over union, and dice similarity coefficient&#x2014;were used to assess the performance of the shaft segmentation networks. The definitions of these performance measures are given below.<disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="UDM5"><mml:mrow><mml:mi mathvariant="normal">DSC</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mspace width="thickmathspace" /><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mrow><mml:mi mathvariant="normal">TP</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mrow><mml:mi mathvariant="normal">TP</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi mathvariant="normal">FP</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi mathvariant="normal">FN</mml:mi></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math></disp-formula>The counts of true positive (TP), false positive (FP), true negative (TN), and false negative (FN) pixels.<disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="UDM6"><mml:mrow><mml:mi mathvariant="normal">IoU</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mspace width="thickmathspace" /><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:mrow><mml:mi mathvariant="normal">TP</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="normal">TP</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi mathvariant="normal">FP</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi mathvariant="normal">FN</mml:mi></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="UDM7"><mml:mrow><mml:mi mathvariant="normal">Accuracy</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mspace width="thickmathspace" /></mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:mrow><mml:mi mathvariant="normal">TP</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi mathvariant="normal">TN</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="normal">TP</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi mathvariant="normal">TN</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi mathvariant="normal">FP</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi mathvariant="normal">FN</mml:mi></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math></disp-formula>It should be noted that both IoU and DSC provide a quantitative assessment of the overlap between the segmentation masks used for prediction and those used for ground truth, with the main difference being that DSC gives true shaft prediction pixels a 2-fold advantage over IoU. For this study, we calculated weighted IoU since both the mask and background had almost equal distribution in the cropped 256&#x2009;&#x00D7;&#x2009;256 pixel images. All three assessment metrics were evaluated on a per-image basis. Accuracy, IoU, and DSC were calculated for each mask generated.</p>
</sec>
<sec id="s2h3"><label>2.8.3.</label><title>Key point detection</title>
<p>NME (Normalized Mean Error) was the primary evaluation criterion for key point designation on the penile shaft. This measure calculates the Euclidean distance between ground truth points and the predicted points, then divides this distance by a normalized factor. The formula is as follows:<disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="UDM8"><mml:mrow><mml:mi mathvariant="normal">NME</mml:mi></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>P</mml:mi><mml:mo>,</mml:mo><mml:mrow><mml:mover><mml:mi>P</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mspace width="thickmathspace" /><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac></mml:mrow><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:munderover><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mrow><mml:mspace width="thinmathspace" /><mml:msub><mml:mi>p</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mover><mml:mrow><mml:mspace width="thinmathspace" /><mml:msub><mml:mi>p</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo></mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mi>d</mml:mi></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula>where <italic>P</italic> and <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM1"><mml:mrow><mml:mover><mml:mi>P</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> denote the predicted and ground-truth coordinates of key points, respectively. N is the number of points, and d is the reference distance to normalize the absolute errors. In this case, the reference distance was taken from the top DMD point to the bottom PMD point.</p>
</sec>
<sec id="s2h4"><label>2.8.4.</label><title>Curvature angle estimation evaluation metrics</title>
<p>The primary scoring system used for curvature angle estimation was a mean absolute error (MAE) and is defined by:<disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="UDM9"><mml:mrow><mml:mi mathvariant="normal">MAE</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mspace width="thickmathspace" /><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac></mml:mrow><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo></mml:mrow></mml:mstyle></mml:math></disp-formula>where <italic>n</italic> is the total number of examples, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM2"><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> is the estimated curvature angle averaged over all predictions for one penile model, and <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM3"><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> is the ground truth value for that same model. Individual error values were calculated for each image and then divided by the total number of images to obtain the overall MAE.</p>
</sec>
</sec>
</sec>
<sec id="s3" sec-type="results"><label>3.</label><title>Results</title>
<p>Shaft segmentation networks, curvature estimation technique, and penile localization model were thoroughly evaluated both numerically and qualitatively as part of the AI framework&#x0027;s performance testing.</p>
<sec id="s3a"><label>3.1.</label><title>Penile area localization</title>
<p>YOLOv5l performed very well in detecting the penile area with an average mAP0.5 of 99.4&#x0025; for 5 folds, and a mAP0.5&#x2013;0.95 value of 73.8&#x0025;. The fold-wise results are given in <xref ref-type="table" rid="T1">Table&#x00A0;1</xref> and indicate that the model did not fail in the assessment of any input image (although small differences in bounding boxes may have caused minor fluctuations in mAP). Other than fold_1, for all cases, the model almost perfectly predicted bounding boxes with 50&#x0025; overlap in IoU.</p>
<table-wrap id="T1" position="float"><label>Table 1</label>
<caption><p>YOLOv5l prediction mAP (mean average precision) for each fold.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Fold</th>
<th valign="top" align="center">mAP0.5</th>
<th valign="top" align="center">mAP50-95</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">fold_0</td>
<td valign="top" align="center">0.995</td>
<td valign="top" align="center">0.693</td>
</tr>
<tr>
<td valign="top" align="left">fold_1</td>
<td valign="top" align="center">0.991</td>
<td valign="top" align="center">0.692</td>
</tr>
<tr>
<td valign="top" align="left">fold_2</td>
<td valign="top" align="center">0.995</td>
<td valign="top" align="center">0.777</td>
</tr>
<tr>
<td valign="top" align="left">fold_3</td>
<td valign="top" align="center">0.995</td>
<td valign="top" align="center">0.713</td>
</tr>
<tr>
<td valign="top" align="left">fold_4</td>
<td valign="top" align="center">0.995</td>
<td valign="top" align="center">0.815</td>
</tr>
<tr>
<td valign="top" align="left">Avg.</td>
<td valign="top" align="center"><bold>0</bold><bold>.</bold><bold>994</bold></td>
<td valign="top" align="center"><bold>0</bold><bold>.</bold><bold>738</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3b"><label>3.2.</label><title>Shaft segmentation</title>
<p><xref ref-type="table" rid="T2">Table&#x00A0;2</xref> provides the segmentation results for all test cases when using UNetE, UNet3P, and MultiResUNet as decoders, and DenseNet121, ResNet50, InceptionV3 and EfficientNetV2M as encoders. For all the encoder and decoder combinations models were trained and test scores were determined. Among all models, the combination of Ensambled UNet (UNetE) and DenseNet121 performed the best, with an average IoU of 96.43&#x0025; for 5 folds. The DSC score and the accuracy were also superior to other models, scoring 94.50&#x0025; and 98.12&#x0025; respectively. In comparison to encoders based on other designs, DenseNet encoders displayed greater levels of performance. This may be due to the broad interconnectedness afforded by thick layers as well as the collective knowledge provided by preceding layers. The use of an ensemble U-Net model architecture may also have improved the performance of the segmentation network by increasing capacity, improving generalization, reducing overfitting, and increasing robustness. By training multiple U-Net models on different subsets of data, and then averaging the predictions obtained, Ensemble U-Net could potentially achieve better generalization with unseen data. In particular, Ensemble U-Net could reduce overfitting by averaging the predictions of multiple models, as well as being more resistant to noise and other variations in the input data (again due to averaging out these effects across multiple models).</p>
<table-wrap id="T2" position="float"><label>Table 2</label>
<caption><p>Segmentation results including IoU (intersection over union) and DSC (dice similarity coefficient).</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Segmentation model</th>
<th valign="top" align="center">Encoder</th>
<th valign="top" align="center">Accuracy</th>
<th valign="top" align="center">IoU</th>
<th valign="top" align="center">DSC</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="4">UNet3P</td>
<td valign="top" align="left">DenseNet121</td>
<td valign="top" align="center">97.88&#x2009;&#x00B1;&#x2009;0.32</td>
<td valign="top" align="center">96.01&#x2009;&#x00B1;&#x2009;0.60</td>
<td valign="top" align="center">93.84&#x2009;&#x00B1;&#x2009;0.93</td>
</tr>
<tr>
<td valign="top" align="left">ResNet50</td>
<td valign="top" align="center">96.70&#x2009;&#x00B1;&#x2009;1.78</td>
<td valign="top" align="center">93.99&#x2009;&#x00B1;&#x2009;2.92</td>
<td valign="top" align="center">90.97&#x2009;&#x00B1;&#x2009;3.43</td>
</tr>
<tr>
<td valign="top" align="left">EfficentNetV2M</td>
<td valign="top" align="center">97.35&#x2009;&#x00B1;&#x2009;0.87</td>
<td valign="top" align="center">95.09&#x2009;&#x00B1;&#x2009;1.49</td>
<td valign="top" align="center">92.53&#x2009;&#x00B1;&#x2009;1.69</td>
</tr>
<tr>
<td valign="top" align="left">InceptionV3</td>
<td valign="top" align="center">97.84&#x2009;&#x00B1;&#x2009;0.30</td>
<td valign="top" align="center">95.94&#x2009;&#x00B1;&#x2009;0.56</td>
<td valign="top" align="center">93.74&#x2009;&#x00B1;&#x2009;0.86</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">MultiResUnet</td>
<td valign="top" align="left">DenseNet121</td>
<td valign="top" align="center">97.99&#x2009;&#x00B1;&#x2009;0.31</td>
<td valign="top" align="center">96.20&#x2009;&#x00B1;&#x2009;0.56</td>
<td valign="top" align="center">94.14&#x2009;&#x00B1;&#x2009;0.80</td>
</tr>
<tr>
<td valign="top" align="left">ResNet50</td>
<td valign="top" align="center">97.86&#x2009;&#x00B1;&#x2009;0.57</td>
<td valign="top" align="center">95.97&#x2009;&#x00B1;&#x2009;0.99</td>
<td valign="top" align="center">93.79&#x2009;&#x00B1;&#x2009;1.24</td>
</tr>
<tr>
<td valign="top" align="left">EfficentNetV2M</td>
<td valign="top" align="center">97.58&#x2009;&#x00B1;&#x2009;0.91</td>
<td valign="top" align="center">95.48&#x2009;&#x00B1;&#x2009;1.52</td>
<td valign="top" align="center">93.08&#x2009;&#x00B1;&#x2009;1.95</td>
</tr>
<tr>
<td valign="top" align="left">InceptionV3</td>
<td valign="top" align="center">98.01&#x2009;&#x00B1;&#x2009;0.26</td>
<td valign="top" align="center">96.24&#x2009;&#x00B1;&#x2009;0.47</td>
<td valign="top" align="center">94.19&#x2009;&#x00B1;&#x2009;0.82</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">UNetE</td>
<td valign="top" align="left">DenseNet121</td>
<td valign="top" align="center"><bold>98.12&#x2009;</bold><bold>&#x00B1;</bold><bold>&#x2009;0.31</bold></td>
<td valign="top" align="center"><bold>96.43&#x2009;</bold><bold>&#x00B1;</bold><bold>&#x2009;0.57</bold></td>
<td valign="top" align="center"><bold>94.50&#x2009;</bold><bold>&#x00B1;</bold><bold>&#x2009;0.75</bold></td>
</tr>
<tr>
<td valign="top" align="left">ResNet50</td>
<td valign="top" align="center">97.84&#x2009;&#x00B1;&#x2009;0.52</td>
<td valign="top" align="center">95.93&#x2009;&#x00B1;&#x2009;0.92</td>
<td valign="top" align="center">93.74&#x2009;&#x00B1;&#x2009;1.18</td>
</tr>
<tr>
<td valign="top" align="left">EfficentNetV2M</td>
<td valign="top" align="center">97.53&#x2009;&#x00B1;&#x2009;0.62</td>
<td valign="top" align="center">95.40&#x2009;&#x00B1;&#x2009;1.07</td>
<td valign="top" align="center">92.94&#x2009;&#x00B1;&#x2009;1.34</td>
</tr>
<tr>
<td valign="top" align="left">InceptionV3</td>
<td valign="top" align="center">97.98&#x2009;&#x00B1;&#x2009;0.33</td>
<td valign="top" align="center">96.18&#x2009;&#x00B1;&#x2009;0.61</td>
<td valign="top" align="center">94.11&#x2009;&#x00B1;&#x2009;0.84</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3c"><label>3.3.</label><title>Key point detection and angle estimation</title>
<p>HRNet performed very well in the detection of key points on both penile model images and segmentation masks. The average test NME (Normalized Mean Error) between ground truth key points and predicted key points was 0.0708 for the images and 0.0430 for derivative masks. For each fold, the predicted angles for individual model images were determined and the results are shown in <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>. Overall MAE for the angles predicted from penile model images was approximately 4.5&#x00B0;.</p>
<table-wrap id="T3" position="float"><label>Table 3</label>
<caption><p>Angle prediction results and MAE (mean average error) for penile model images.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Test case</th>
<th valign="top" align="center">Ground Truth</th>
<th valign="top" align="center">Predicted Angle</th>
<th valign="top" align="center">MAE</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">pModel_1</td>
<td valign="top" align="center">75</td>
<td valign="top" align="center">75.63684&#x00B1;3.908193</td>
<td valign="top" align="center">3.031769</td>
</tr>
<tr>
<td valign="top" align="left">pModel_2</td>
<td valign="top" align="center">33</td>
<td valign="top" align="center">32.88968&#x00B1;3.08491</td>
<td valign="top" align="center">2.312334</td>
</tr>
<tr>
<td valign="top" align="left">pModel_3</td>
<td valign="top" align="center">82</td>
<td valign="top" align="center">77.54307&#x2009;&#x00B1;9.729288</td>
<td valign="top" align="center">6.782767</td>
</tr>
<tr>
<td valign="top" align="left">pModel_4</td>
<td valign="top" align="center">40</td>
<td valign="top" align="center">43.14194&#x2009;&#x00B1;3.134882</td>
<td valign="top" align="center">3.503608</td>
</tr>
<tr>
<td valign="top" align="left">pModel_5</td>
<td valign="top" align="center">58</td>
<td valign="top" align="center">57.01167&#x2009;&#x00B1;4.751162</td>
<td valign="top" align="center">3.992647</td>
</tr>
<tr>
<td valign="top" align="left">pModel_6</td>
<td valign="top" align="center">50</td>
<td valign="top" align="center">48.53089&#x2009;&#x00B1;3.796258</td>
<td valign="top" align="center">3.372716</td>
</tr>
<tr>
<td valign="top" align="left">pModel_7</td>
<td valign="top" align="center">86</td>
<td valign="top" align="center">82.17514&#x00B1;7.00086</td>
<td valign="top" align="center">6.12593</td>
</tr>
<tr>
<td valign="top" align="left">pModel_8</td>
<td valign="top" align="center">60</td>
<td valign="top" align="center">64.60267&#x2009;&#x00B1;8.151951</td>
<td valign="top" align="center">7.484112</td>
</tr>
<tr>
<td valign="top" align="left">pModel_9</td>
<td valign="top" align="center">18</td>
<td valign="top" align="center">14.52068&#x2009;&#x00B1;3.524402</td>
<td valign="top" align="center">3.925218</td>
</tr>
<tr>
<td valign="top" align="left"/>
<td valign="top" align="center"/>
<td valign="top" align="center"><bold>Overall:</bold></td>
<td valign="top" align="center"><bold>4</bold><bold>.</bold><bold>522118</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Angle predictions from segmentation masks were superior to those obtained from penile model images, with an overall MAE of just 3.8&#x00B0; as shown in <xref ref-type="table" rid="T4">Table&#x00A0;4</xref>. <xref ref-type="fig" rid="F8">Figure&#x00A0;8</xref> displays the improvement in predictions achieved when using masks instead of original images (as indicated by lower standard deviation and consistent angle prediction for all images from the same model). Overall, model performance outperforms the previous study of penile angle calculation using the same dataset by Abbas et al. (<xref ref-type="bibr" rid="B32">32</xref>). While that study showed an overall MAE of 8.53, we could achieve as less as 3.81 which is almost 2.24 times better. Compared with previous studies of penile curvature using plastic models, the novel pipeline reported here was also more accurate than Goniometer and/or UVI approaches where the mean error was up to 13.6 (<xref ref-type="bibr" rid="B17">17</xref>).</p>
<fig id="F8" position="float"><label>Figure 8</label>
<caption><p>Curvature angle estimation performance from penile model images and derivative masks.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fped-11-1149318-g008.tif"/>
</fig>
<table-wrap id="T4" position="float"><label>Table 4</label>
<caption><p>Angle prediction results and MAE (mean average error) for segmentation masks.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Test case</th>
<th valign="top" align="center">Ground Truth</th>
<th valign="top" align="center">Predicted Angle</th>
<th valign="top" align="center">MAE</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">pModel_1</td>
<td valign="top" align="center">75</td>
<td valign="top" align="center">74.15353&#x2009;&#x00B1;&#x2009;3.895762</td>
<td valign="top" align="center">3.041326</td>
</tr>
<tr>
<td valign="top" align="left">pModel_2</td>
<td valign="top" align="center">33</td>
<td valign="top" align="center">33.47152&#x2009;&#x00B1;&#x2009;2.812504</td>
<td valign="top" align="center">2.156159</td>
</tr>
<tr>
<td valign="top" align="left">pModel_3</td>
<td valign="top" align="center">82</td>
<td valign="top" align="center">80.39 399&#x2009;&#x00B1;5.277415</td>
<td valign="top" align="center">4.156524</td>
</tr>
<tr>
<td valign="top" align="left">pModel_4</td>
<td valign="top" align="center">40</td>
<td valign="top" align="center">44.77705&#x2009;&#x00B1;&#x2009;2.881157</td>
<td valign="top" align="center">4.849213</td>
</tr>
<tr>
<td valign="top" align="left">pModel_5</td>
<td valign="top" align="center">58</td>
<td valign="top" align="center">58.15946&#x2009;&#x00B1;&#x2009;3.736759</td>
<td valign="top" align="center">3.004913</td>
</tr>
<tr>
<td valign="top" align="left">pModel_6</td>
<td valign="top" align="center">50</td>
<td valign="top" align="center">48.60104&#x2009;&#x00B1;&#x2009;8.772863</td>
<td valign="top" align="center">4.758214</td>
</tr>
<tr>
<td valign="top" align="left">pModel_7</td>
<td valign="top" align="center">86</td>
<td valign="top" align="center">87.01886&#x2009;&#x00B1;&#x2009;4.666506</td>
<td valign="top" align="center">3.596134</td>
</tr>
<tr>
<td valign="top" align="left">pModel_8</td>
<td valign="top" align="center">60</td>
<td valign="top" align="center">62.63402&#x2009;&#x00B1;&#x2009;5.544223</td>
<td valign="top" align="center">4.609684</td>
</tr>
<tr>
<td valign="top" align="left">pModel_9</td>
<td valign="top" align="center">18</td>
<td valign="top" align="center">14.08197&#x2009;&#x00B1;&#x2009;3.026902</td>
<td valign="top" align="center">4.163569</td>
</tr>
<tr>
<td valign="top" align="left"/>
<td valign="top" align="center"/>
<td valign="top" align="center"><bold>Overall:</bold></td>
<td valign="top" align="center"><bold>3</bold><bold>.</bold><bold>813667</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Finally, we proceeded to test model performance using real patient masks as shown in <xref ref-type="fig" rid="F9">Figure&#x00A0;9</xref>. Despite having been trained on masks from penile models, our HRNet-based tool was able to successfully predict both DMD and PMD landmarks on the real patient masks. The angle calculations generated from these masks were also comparable with manual image assessment by a clinical expert using the mobile application Angle 360 (<xref ref-type="table" rid="T5">Table&#x00A0;5</xref>).</p>
<fig id="F9" position="float"><label>Figure 9</label>
<caption><p>HRNet key-point detection results using masks manually extracted from real patient images.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fped-11-1149318-g009.tif"/>
</fig>
<table-wrap id="T5" position="float"><label>Table 5</label>
<caption><p>Comparison between human expert vs. AI for penile angle prediction.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Subject</th>
<th valign="top" align="center">Prediction by HRNet</th>
<th valign="top" align="center">Prediction by a medical expert</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">1</td>
<td valign="top" align="center">17.75&#x00B0;</td>
<td valign="top" align="center">23.82&#x00B0;</td>
</tr>
<tr>
<td valign="top" align="left">2</td>
<td valign="top" align="center">28.30&#x00B0;</td>
<td valign="top" align="center">29.99&#x00B0;</td>
</tr>
<tr>
<td valign="top" align="left">3</td>
<td valign="top" align="center">64.43&#x00B0;</td>
<td valign="top" align="center">69.79&#x00B0;</td>
</tr>
<tr>
<td valign="top" align="left">4</td>
<td valign="top" align="center">42.73&#x00B0;</td>
<td valign="top" align="center">44.93&#x00B0;</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s4" sec-type="discussion"><label>4.</label><title>Discussion</title>
<p>Rapid advances in computational power have ensured that AI is gaining ever more popularity for the automation of routine clinical tasks. AI now offers the opportunity to build highly accurate models that enable precise and timely examination of medical images. Abbas et al<italic>.</italic> (<xref ref-type="bibr" rid="B32">32</xref>), previously aimed to automate curvature estimations from 2D images based on localization and segmentation of the penile shaft, but subsequent angle calculation displayed several limitations. In the current study, rather than use a hard-coded approach, we instead developed a novel deep learning-based algorithm that can robustly calculate the extent of penile curvature with a high level of accuracy.</p>
<p>Penile curvature (PC) assessment is not standardized and remains prone to considerable variability and subjectivity (<xref ref-type="bibr" rid="B46">46</xref>). Typical measurement processes are UVI (Unaided Visual Inspection) or by Goniometer, which have proven distinctly unreliable. In a previous study by Villanueva et al<italic>.</italic>, the mean errors for all PC measurement techniques ranged from 3.5&#x00B0; to 13.6&#x00B0;, with no significant difference between UVI and goniometry procedures (<xref ref-type="bibr" rid="B17">17</xref>). Since surgeons cannot reliably evaluate PC, and there are currently no guidelines for real-time intraoperative measurement of curvature, there is a clear unmet clinical need to develop more robust methods of assessing PC. Accordingly, Fernandez et al<italic>.</italic> (<xref ref-type="bibr" rid="B43">43</xref>), attempted to standardize curvature measurement from 2D images in a semi-automated manner, but the resultant algorithm depended on identifying the geometric centre of the penile shaft, which can vary significantly from patient to patient. In addition, this process still required direct human intervention, hence results could vary markedly depending on user expertise. Similarly, Villanueva et al<italic>.</italic> (<xref ref-type="bibr" rid="B14">14</xref>), used an app-based approach to calculate curvature angles from 2D images, but again the same technical limitations prevent wider application of this method.</p>
<p>In previous work, Abbas et al<italic>.</italic> (<xref ref-type="bibr" rid="B32">32</xref>), proposed a fully automated, end-to-end application that could predict PC extent from captured images, but the hard-coded angle calculation step was unreliable when applied to real-life cases (which displays highly variable shaft size and shape, unlike the uniform plastic models used in initial testing). Additionally, the slope-based calculation was found to give erroneous results when angles approached 90&#x00B0; (since the tangent value of 90&#x00B0; is undefined). To overcome the limitations of previous studies, here we developed a new algorithm in which angle calculation no longer depends on identifying the curved region or centre point of &#x201C;maximum&#x201D; curvature. Using deep learning models instead of typical image analysis approaches, we achieved substantial improvement in angle predictions and then proceeded to test performance using shaft masks from real patients. The deep learning process showed moderate accuracy, indicating potentiality for translation into real-life scenarios. To achieve this goal, further model development will require: (1) a large dataset of penile curvature images from real-life patients, and/or (2) an improved segmentation step that can predict shaft masks with similar accuracy in both plastic models and real patients. A few limitations of this study should also be noted. In particular, camera angle and picture quality can impact mask generation and angle calculation, although this process should perform well for images taken from a lateral view under well-lit conditions. Also, segmentation of real-life anatomy is more challenging due to excess dartos, soft tissues, blood etc. Despite these drawbacks, this study successfully developed a novel and accurate framework for automated penile curvature measurement in regulated circumstances.</p>
</sec>
<sec id="s5" sec-type="conclusions"><label>5.</label><title>Conclusion</title>
<p>We devised an innovative AI-based approach to perform high-accuracy automatic measurements of PC. This technique uses deep neural networks to segment the penile shaft from captured images and then employs another deep learning network to determine the curvature angle. These findings are superior to those obtained <italic>via</italic> physical examination by urologists and can be accomplished in a far shorter amount of time. Our findings indicate that AI-based approaches may provide accurate, reliable, and generally accessible methods of measuring PC, which might address several flaws present in current assessment methods. The approach discussed in this article may not yet be ready for clinical application, but represents a significant step towards real-time automated PC monitoring in clinical settings.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability"><title>Data availability statement</title>
<p>The datasets generated during and/or analyzed during the current study are available from the corresponding author upon reasonable request. Requests to access these datasets should be directed to Tariq O. Abbas, tariq2c@hotmail.com.</p>
</sec>
<sec id="s8"><title>Author contributions</title>
<p>TA devised the study concept and assisted SB in writing the manuscript. SB conducted all experiments with the help of MA, SM, and SK. MF helped in system development and manuscript revision. MC supervised the AI tasks and assisted with data analysis and manuscript revision. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec id="s9" sec-type="funding-information"><title>Funding</title>
<p>The authors declare that this study received funding from Hamad Medical Corporation Medical Research Center &#x0023;20841. The funder was not involved in the study design, collection, analysis, interpretation of data, writing of this article, or the decision to submit it for publication.</p>
</sec>
<ack><title>Acknowledgments</title>
<p>Special thanks to Carlos Villanueva for providing the 3D printed models with pre-defined angulations as used in <xref ref-type="fig" rid="F1">Figures&#x00A0;1</xref>, <xref ref-type="fig" rid="F2">2</xref>, <xref ref-type="fig" rid="F4">4</xref>. Open Access Fund fees were supported by Sidra Medicine, Doha, Qatar.</p>
</ack>
<sec id="s7" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s11" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s10" sec-type="supplementary-material"><title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fped.2023.1149318/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fped.2023.1149318/full&#x0023;supplementary-material</ext-link>.</p>
<supplementary-material id="SD1" content-type="local-data">
<media mimetype="application" mime-subtype="vnd.openxmlformats-officedocument.wordprocessingml.document" xlink:href="Table1.docx"/>
</supplementary-material>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Langer</surname><given-names>JC</given-names></name><name><surname>Coplen</surname><given-names>DE</given-names></name></person-group>. <article-title>Circumcision and pediatric disorders of the penis</article-title>. <source>Pediatr Clin N Am</source>. (<year>1998</year>) <volume>45</volume>(<issue>4</issue>):<fpage>801</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/S0031-3955(05)70046-8</pub-id></citation></ref>
<ref id="B2"><label>2.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nyir&#x00E1;dy</surname><given-names>P</given-names></name><name><surname>Kelemen</surname><given-names>Z</given-names></name><name><surname>B&#x00E1;nfi</surname><given-names>G</given-names></name><name><surname>Rusz</surname><given-names>A</given-names></name><name><surname>Majoros</surname><given-names>A</given-names></name><name><surname>Romics</surname><given-names>I</given-names></name></person-group>. <article-title>Management of congenital penile curvature</article-title>. <source>J Urol</source>. (<year>2008</year>) <volume>179</volume>(<issue>4</issue>):<fpage>1495</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/j.juro.2007.11.059</pub-id></citation></ref>
<ref id="B3"><label>3.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Merriman</surname><given-names>LS</given-names></name><name><surname>Arlen</surname><given-names>AM</given-names></name><name><surname>Broecker</surname><given-names>BH</given-names></name><name><surname>Smith</surname><given-names>EA</given-names></name><name><surname>Kirsch</surname><given-names>AJ</given-names></name><name><surname>Elmore</surname><given-names>JM</given-names></name></person-group>. <article-title>The GMS hypospadias score: assessment of inter-observer reliability and correlation with post-operative complications</article-title>. <source>J Pediatr Urol</source>. (<year>2013</year>) <volume>9</volume>(<issue>6</issue>):<fpage>707</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2013.04.006</pub-id><pub-id pub-id-type="pmid">23683961</pub-id></citation></ref>
<ref id="B4"><label>4.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbas</surname><given-names>TO</given-names></name><name><surname>Vallasciani</surname><given-names>S</given-names></name><name><surname>Elawad</surname><given-names>A</given-names></name><name><surname>Elifranji</surname><given-names>M</given-names></name><name><surname>Leslie</surname><given-names>B</given-names></name><name><surname>Elkadhi</surname><given-names>A</given-names></name><etal/></person-group> <article-title>Plate objective scoring tool (POST); an objective methodology for the assessment of urethral plate in distal hypospadias</article-title>. <source>J Pediatr Urol</source>. (<year>2020</year>) <volume>16</volume>(<issue>5</issue>):<fpage>675</fpage>&#x2013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2020.07.043</pub-id><pub-id pub-id-type="pmid">32830060</pub-id></citation></ref>
<ref id="B5"><label>5.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Keays</surname><given-names>MA</given-names></name><name><surname>Dave</surname><given-names>S</given-names></name></person-group>. <article-title>Current hypospadias management: diagnosis, surgical management, and long-term patient-centred outcomes</article-title>. <source>Can Urol Assoc J</source>. (<year>2017</year>) <volume>11</volume>(<issue>1-2Suppl1</issue>):<fpage>S48</fpage>. <pub-id pub-id-type="doi">10.5489/cuaj.4386</pub-id><pub-id pub-id-type="pmid">28265319</pub-id></citation></ref>
<ref id="B6"><label>6.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbas</surname><given-names>TO</given-names></name></person-group>. <article-title>An objective hypospadias classification system</article-title>. <source>J Pediatr Urol</source>. (<year>2022</year>) <volume>18</volume>(<issue>4</issue>):<fpage>481</fpage>.<comment>e1&#x2013;.e8</comment>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2022.05.001</pub-id></citation></ref>
<ref id="B7"><label>7.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>McCarthy</surname><given-names>L</given-names></name><name><surname>Abbas</surname><given-names>T</given-names></name></person-group>. <article-title>Hypospadias: repair of distal hypospadias</article-title>. <source>BJU Int</source>. (<year>2020</year>). <pub-id pub-id-type="doi">10.18591/BJUIK.0624</pub-id>. <comment>(Epub ahead of print]</comment></citation></ref>
<ref id="B8"><label>8.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Daskalopoulos</surname><given-names>EI</given-names></name><name><surname>Baskin</surname><given-names>L</given-names></name><name><surname>Duckett</surname><given-names>JW</given-names></name><name><surname>Snyder</surname><given-names>HM</given-names><suffix>III</suffix></name></person-group>. <article-title>Congenital penile curvature (chordee without hypospadias)</article-title>. <source>Urology</source>. (<year>1993</year>) <volume>42</volume>(<issue>6</issue>):<fpage>708</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/0090-4295(93)90540-Q</pub-id><pub-id pub-id-type="pmid">8256405</pub-id></citation></ref>
<ref id="B9"><label>9.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yachia</surname><given-names>D</given-names></name><name><surname>Beyar</surname><given-names>M</given-names></name><name><surname>Aridogan</surname><given-names>IA</given-names></name><name><surname>Dascalu</surname><given-names>S</given-names></name></person-group>. <article-title>The incidence of congenital penile curvature</article-title>. <source>J Urol</source>. (<year>1993</year>) <volume>150</volume>(<issue>5</issue>):<fpage>1478</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1016/S0022-5347(17)35816-0</pub-id><pub-id pub-id-type="pmid">8411431</pub-id></citation></ref>
<ref id="B10"><label>10.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Salle</surname><given-names>JP</given-names></name><name><surname>Sayed</surname><given-names>S</given-names></name><name><surname>Salle</surname><given-names>A</given-names></name><name><surname>Bagli</surname><given-names>D</given-names></name><name><surname>Farhat</surname><given-names>W</given-names></name><name><surname>Koyle</surname><given-names>M</given-names></name><etal/></person-group> <article-title>Proximal hypospadias: a persistent challenge. Single institution outcome analysis of three surgical techniques over a 10-year period</article-title>. <source>J Pediatr Urol</source>. (<year>2016</year>) <volume>12</volume>(<issue>1</issue>):<fpage>28</fpage>.<comment>e1&#x2013;.e7</comment>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2015.06.011</pub-id></citation></ref>
<ref id="B11"><label>11.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbas</surname><given-names>TO</given-names></name></person-group>. <article-title>Evaluation of penile curvature in patients with hypospadias; gaps in the current practice and future perspectives</article-title>. <source>J Pediatr Urol</source>. (<year>2021</year>) <volume>18</volume>(<issue>2</issue>):<fpage>151</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2021.12.015</pub-id><pub-id pub-id-type="pmid">35031224</pub-id></citation></ref>
<ref id="B12"><label>12.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bologna</surname><given-names>RA</given-names></name><name><surname>Noah</surname><given-names>TA</given-names></name><name><surname>Nasrallah</surname><given-names>PF</given-names></name><name><surname>McMahon</surname><given-names>DR</given-names></name></person-group>. <article-title>Chordee: varied opinions and treatments as documented in a survey of the American academy of pediatrics, section of urology</article-title>. <source>Urology</source>. (<year>1999</year>) <volume>53</volume>(<issue>3</issue>):<fpage>608</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/S0090-4295(98)00656-6</pub-id><pub-id pub-id-type="pmid">10096392</pub-id></citation></ref>
<ref id="B13"><label>13.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Springer</surname><given-names>A</given-names></name><name><surname>Krois</surname><given-names>W</given-names></name><name><surname>Horcher</surname><given-names>E</given-names></name></person-group>. <article-title>Trends in hypospadias surgery: results of a worldwide survey</article-title>. <source>Eur Urol</source>. (<year>2011</year>) <volume>60</volume>(<issue>6</issue>):<fpage>1184</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1016/j.eururo.2011.08.031</pub-id><pub-id pub-id-type="pmid">21871708</pub-id></citation></ref>
<ref id="B14"><label>14.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Villanueva</surname><given-names>CA</given-names></name></person-group>. <article-title>Ventral penile curvature estimation using an app</article-title>. <source>J Pediatr Urol</source>. (<year>2020</year>) <volume>16</volume>(<issue>4</issue>):<fpage>437</fpage>.<comment>e1&#x2013;.e3</comment>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2020.04.027</pub-id></citation></ref>
<ref id="B15"><label>15.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbas</surname><given-names>TO</given-names></name><name><surname>Charles</surname><given-names>A</given-names></name><name><surname>Ali</surname><given-names>M</given-names></name><name><surname>Salle</surname><given-names>JLP</given-names></name></person-group>. <article-title>Long-term fate of the incised urethral plate in snodgrass procedure; A real concern does exist</article-title>. <source>Urol Case Rep</source>. (<year>2020</year>) <volume>32</volume>:<fpage>101216</fpage>. <pub-id pub-id-type="doi">10.1016/j.eucr.2020.101216</pub-id><pub-id pub-id-type="pmid">32435593</pub-id></citation></ref>
<ref id="B16"><label>16.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bethell</surname><given-names>G</given-names></name><name><surname>Chhabra</surname><given-names>S</given-names></name><name><surname>Shalaby</surname><given-names>M</given-names></name><name><surname>Corbett</surname><given-names>H</given-names></name><name><surname>Kenny</surname><given-names>S</given-names></name><name><surname>Contributors</surname><given-names>BN</given-names></name><etal/></person-group> <article-title>Parental decisional satisfaction after hypospadias repair in the United Kingdom</article-title>. <source>J Pediatr Urol</source>. (<year>2020</year>) <volume>16</volume>(<issue>2</issue>):<fpage>164</fpage>.<comment>e1&#x2013;.e7</comment>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2020.01.005</pub-id></citation></ref>
<ref id="B17"><label>17.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Villanueva</surname><given-names>CA</given-names></name></person-group>. <article-title>Goniometer not better than unaided visual inspection at estimating ventral penile curvature on plastic models</article-title>. <source>J Pediatr Urol</source>. (<year>2019</year>) <volume>15</volume>(<issue>6</issue>):<fpage>628</fpage>&#x2013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2019.09.020</pub-id><pub-id pub-id-type="pmid">31680019</pub-id></citation></ref>
<ref id="B18"><label>18.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbas</surname><given-names>TO</given-names></name></person-group>. <article-title>The rising need for preoperative objective quantification of curvature in patients with hypospadias</article-title>. <source>J Pediatr Urol</source>. (<year>2021</year>) <volume>17</volume>(<issue>4</issue>):<fpage>599</fpage>&#x2013;<lpage>600</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2021.06.028</pub-id><pub-id pub-id-type="pmid">34274234</pub-id></citation></ref>
<ref id="B19"><label>19.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gittes</surname><given-names>R</given-names></name></person-group>. <article-title>Mclaughlin 3rd A. Injection technique to induce penile erection</article-title>. <source>Urology</source>. (<year>1974</year>) <volume>4</volume>(<issue>4</issue>):<fpage>473</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1016/0090-4295(74)90025-9</pub-id><pub-id pub-id-type="pmid">4418594</pub-id></citation></ref>
<ref id="B20"><label>20.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ahuja</surname><given-names>AS</given-names></name></person-group>. <article-title>The impact of artificial intelligence in medicine on the future role of the physician</article-title>. <source>PeerJ</source>. (<year>2019</year>) <volume>7</volume>:<fpage>e7702</fpage>. <pub-id pub-id-type="doi">10.7717/peerj.7702</pub-id><pub-id pub-id-type="pmid">31592346</pub-id></citation></ref>
<ref id="B21"><label>21.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ullah</surname><given-names>F</given-names></name><name><surname>Ansari</surname><given-names>SU</given-names></name><name><surname>Hanif</surname><given-names>M</given-names></name><name><surname>Ayari</surname><given-names>MA</given-names></name><name><surname>Chowdhury</surname><given-names>MEH</given-names></name><name><surname>Khandakar</surname><given-names>AA</given-names></name><etal/></person-group> <article-title>Brain MR image enhancement for tumor segmentation using 3D U-Net</article-title>. <source>Sensors</source>. (<year>2021</year>) <volume>21</volume>(<issue>22</issue>):<fpage>7528</fpage>. <pub-id pub-id-type="doi">10.3390/s21227528</pub-id><pub-id pub-id-type="pmid">34833602</pub-id></citation></ref>
<ref id="B22"><label>22.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gul</surname><given-names>S</given-names></name><name><surname>Khan</surname><given-names>MS</given-names></name><name><surname>Bibi</surname><given-names>A</given-names></name><name><surname>Khandakar</surname><given-names>A</given-names></name><name><surname>Ayari</surname><given-names>MA</given-names></name><name><surname>Chowdhury</surname><given-names>ME</given-names></name></person-group>. <article-title>Deep learning techniques for liver and liver tumor segmentation: a review</article-title>. <source>Comput Biol Med</source>. (<year>2022</year>) <volume>147</volume>:<fpage>105620</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.105620</pub-id><pub-id pub-id-type="pmid">35667155</pub-id></citation></ref>
<ref id="B23"><label>23.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tahir</surname><given-names>AM</given-names></name><name><surname>Qiblawey</surname><given-names>Y</given-names></name><name><surname>Khandakar</surname><given-names>A</given-names></name><name><surname>Rahman</surname><given-names>T</given-names></name><name><surname>Khurshid</surname><given-names>U</given-names></name><name><surname>Musharavati</surname><given-names>F</given-names></name><etal/></person-group> <article-title>Deep learning for reliable classification of COVID-19, MERS, and SARS from chest x-ray images</article-title>. <source>Cognit Comput</source>. (<year>2022</year>) <volume>14</volume>(<issue>5</issue>):<fpage>1752</fpage>&#x2013;<lpage>72</lpage>. <pub-id pub-id-type="doi">10.1007/s12559-021-09955-1</pub-id><pub-id pub-id-type="pmid">35035591</pub-id></citation></ref>
<ref id="B24"><label>24.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shuja</surname><given-names>J</given-names></name><name><surname>Alanazi</surname><given-names>E</given-names></name><name><surname>Alasmary</surname><given-names>W</given-names></name><name><surname>Alashaikh</surname><given-names>A</given-names></name></person-group>. <article-title>COVID-19 open source data sets: a comprehensive survey</article-title>. <source>Appl Intell</source>. (<year>2021</year>) <volume>51</volume>(<issue>3</issue>):<fpage>1296</fpage>&#x2013;<lpage>325</lpage>. <pub-id pub-id-type="doi">10.1007/s10489-020-01862-6</pub-id></citation></ref>
<ref id="B25"><label>25.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Litjens</surname><given-names>G</given-names></name><name><surname>Kooi</surname><given-names>T</given-names></name><name><surname>Bejnordi</surname><given-names>BE</given-names></name><name><surname>Setio</surname><given-names>AAA</given-names></name><name><surname>Ciompi</surname><given-names>F</given-names></name><name><surname>Ghafoorian</surname><given-names>M</given-names></name><etal/></person-group> <article-title>A survey on deep learning in medical image analysis</article-title>. <source>Med Image Anal</source>. (<year>2017</year>) <volume>42</volume>:<fpage>60</fpage>&#x2013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2017.07.005</pub-id><pub-id pub-id-type="pmid">28778026</pub-id></citation></ref>
<ref id="B26"><label>26.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Eun</surname><given-names>S-J</given-names></name><name><surname>Kim</surname><given-names>J</given-names></name><name><surname>Kim</surname><given-names>KH</given-names></name></person-group>. <article-title>Applications of artificial intelligence in urological setting: a hopeful path to improved care</article-title>. <source>J Exerc Rehabil</source>. (<year>2021</year>) <volume>17</volume>(<issue>5</issue>):<fpage>308</fpage>. <pub-id pub-id-type="doi">10.12965/jer.2142596.298</pub-id><pub-id pub-id-type="pmid">34805018</pub-id></citation></ref>
<ref id="B27"><label>27.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hameed</surname><given-names>B</given-names></name><name><surname>Dhavileswarapu</surname><given-names>S</given-names></name><name><surname>Aiswarya</surname><given-names>V</given-names></name><name><surname>Raza</surname><given-names>SZ</given-names></name><name><surname>Karimi</surname><given-names>H</given-names></name><name><surname>Khanuja</surname><given-names>HS</given-names></name><etal/></person-group> <article-title>Artificial intelligence and its impact on urological diseases and management: a comprehensive review of the literature</article-title>. <source>J Clin Med</source>. (<year>2021</year>) <volume>10</volume>(<issue>9</issue>):<fpage>1864</fpage>. <pub-id pub-id-type="doi">10.3390/jcm10091864</pub-id><pub-id pub-id-type="pmid">33925767</pub-id></citation></ref>
<ref id="B28"><label>28.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>J</given-names></name><name><surname>Remulla</surname><given-names>D</given-names></name><name><surname>Nguyen</surname><given-names>JH</given-names></name><name><surname>Liu</surname><given-names>Y</given-names></name><name><surname>Dasgupta</surname><given-names>P</given-names></name><name><surname>Hung</surname><given-names>AJ</given-names></name></person-group>. <article-title>Current status of artificial intelligence applications in urology and their potential to influence clinical practice</article-title>. <source>BJU Int</source>. (<year>2019</year>) <volume>124</volume>(<issue>4</issue>):<fpage>567</fpage>&#x2013;<lpage>77</lpage>. <pub-id pub-id-type="doi">10.1111/bju.14852</pub-id><pub-id pub-id-type="pmid">31219658</pub-id></citation></ref>
<ref id="B29"><label>29.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Abbas</surname><given-names>TO</given-names></name><name><surname>AbdelMoniem</surname><given-names>M</given-names></name><name><surname>Khalil</surname><given-names>I</given-names></name><name><surname>Hossain</surname><given-names>MSA</given-names></name><name><surname>Chowdhury</surname><given-names>ME</given-names></name></person-group>. <comment>Deep Learning based Automatic Quantification of Urethral Plate Quality using the Plate Objective Scoring Tool (POST). arXiv preprint arXiv:220913848. (2022)</comment>).</citation></ref>
<ref id="B30"><label>30.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kel&#x00E2;mi</surname><given-names>A</given-names></name></person-group>. <article-title>Autophotography in evaluation of functional penile disorders</article-title>. <source>Urology</source>. (<year>1983</year>) <volume>21</volume>(<issue>6</issue>):<fpage>628</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1016/0090-4295(83)90210-8</pub-id></citation></ref>
<ref id="B31"><label>31.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mosa</surname><given-names>H</given-names></name><name><surname>Paul</surname><given-names>A</given-names></name><name><surname>Solomon</surname><given-names>E</given-names></name><name><surname>Garriboli</surname><given-names>M</given-names></name></person-group>. <article-title>How accurate is eyeball measurement of curvature? A tool for hypospadias surgery</article-title>. <source>J Pediatr Urol</source>. (<year>2022</year>) <volume>18</volume>(<issue>4</issue>):<fpage>470</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2022.04.009</pub-id><pub-id pub-id-type="pmid">35534383</pub-id></citation></ref>
<ref id="B32"><label>32.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbas</surname><given-names>TO</given-names></name><name><surname>AbdelMoniem</surname><given-names>M</given-names></name><name><surname>Chowdhury</surname><given-names>M</given-names></name></person-group>. <article-title>Automated quantification of penile curvature using artificial intelligence</article-title>. <source>Front Artif Intell</source>. (<year>2022</year>) <volume>188</volume>(<issue>5</issue>):<fpage>954497</fpage>. <pub-id pub-id-type="doi">10.3389/frai.2022.954497</pub-id></citation></ref>
<ref id="B33"><label>33.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Bochkovskiy</surname><given-names>A</given-names></name><name><surname>Wang</surname><given-names>C-Y</given-names></name><name><surname>Liao</surname><given-names>H-YM</given-names></name></person-group>. <comment>Yolov4: Optimal speed and accuracy of object detection. arXiv preprint arXiv:200410934. (2020)</comment>.</citation></ref>
<ref id="B34"><label>34.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Wada</surname><given-names>K</given-names></name></person-group>. <comment>Labelme Github2020 [cited 2022 25-Dec-2022]. v5.1.1:[Image Polygonal Annotation with Python (polygon, rectangle, circle, line, point and image-level flag annotation).]</comment>.</citation></ref>
<ref id="B35"><label>35.</label><citation citation-type="confproc"><person-group person-group-type="editor"><name><surname>Huang</surname><given-names>H</given-names></name><name><surname>Lin</surname><given-names>L</given-names></name><name><surname>Tong</surname><given-names>R</given-names></name><name><surname>Hu</surname><given-names>H</given-names></name><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Iwamoto</surname><given-names>Y</given-names></name><etal/></person-group>, editors. <conf-name>Unet 3&#x002B;: a full-scale connected unet for medical image segmentation</conf-name>. <conf-name>ICASSP 2020-2020 IEEE international conference on acoustics, speech and signal processing (ICASSP)</conf-name> (<year>2020</year>). <publisher-name>IEEE</publisher-name>.</citation></ref>
<ref id="B36"><label>36.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ibtehaz</surname><given-names>N</given-names></name><name><surname>Rahman</surname><given-names>MS</given-names></name></person-group>. <article-title>MultiResUNet: rethinking the U-net architecture for multimodal biomedical image segmentation</article-title>. <source>Neural Netw</source>. (<year>2020</year>) <volume>121</volume>:<fpage>74</fpage>&#x2013;<lpage>87</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2019.08.025</pub-id><pub-id pub-id-type="pmid">31536901</pub-id></citation></ref>
<ref id="B37"><label>37.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname><given-names>Z</given-names></name><name><surname>Siddiquee</surname><given-names>MMR</given-names></name><name><surname>Tajbakhsh</surname><given-names>N</given-names></name><name><surname>Liang</surname><given-names>J</given-names></name></person-group>. <article-title>Unet&#x002B;&#x002B;: redesigning skip connections to exploit multiscale features in image segmentation</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2019</year>) <volume>39</volume>(<issue>6</issue>):<fpage>1856</fpage>&#x2013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2019.2959609</pub-id><pub-id pub-id-type="pmid">31841402</pub-id></citation></ref>
<ref id="B38"><label>38.</label><citation citation-type="confproc"><person-group person-group-type="editor"><name><surname>He</surname><given-names>K</given-names></name><name><surname>Zhang</surname><given-names>X</given-names></name><name><surname>Ren</surname><given-names>S</given-names></name><name><surname>Sun</surname><given-names>J</given-names></name></person-group>, editors. <conf-name>Deep residual learning for image recognition</conf-name>. <conf-name>Proceedings of the IEEE conference on computer vision and pattern recognition</conf-name> (<year>2016</year>).</citation></ref>
<ref id="B39"><label>39.</label><citation citation-type="confproc"><person-group person-group-type="editor"><name><surname>Huang</surname><given-names>G</given-names></name><name><surname>Liu</surname><given-names>Z</given-names></name><name><surname>Van Der Maaten</surname><given-names>L</given-names></name><name><surname>Weinberger</surname><given-names>KQ</given-names></name></person-group>, editors. <conf-name>Densely connected convolutional networks</conf-name>. <conf-name>Proceedings of the IEEE conference on computer vision and pattern recognition</conf-name> (<year>2017</year>).</citation></ref>
<ref id="B40"><label>40.</label><citation citation-type="confproc"><person-group person-group-type="editor"><name><surname>Szegedy</surname><given-names>C</given-names></name><name><surname>Vanhoucke</surname><given-names>V</given-names></name><name><surname>Ioffe</surname><given-names>S</given-names></name><name><surname>Shlens</surname><given-names>J</given-names></name><name><surname>Wojna</surname><given-names>Z</given-names></name></person-group>, editors. <conf-name>Rethinking the inception architecture for computer vision</conf-name>. <conf-name>Proceedings of the IEEE conference on computer vision and pattern recognition</conf-name> (<year>2016</year>).</citation></ref>
<ref id="B41"><label>41.</label><citation citation-type="confproc"><person-group person-group-type="editor"><name><surname>Tan</surname><given-names>M</given-names></name><name><surname>Le</surname><given-names>Q</given-names></name></person-group>, editors. <conf-name>Efficientnetv2: smaller models and faster training</conf-name>. <conf-name>International conference on machine learning</conf-name> (<year>2021</year>). <publisher-name>PMLR</publisher-name>.</citation></ref>
<ref id="B42"><label>42.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Siapno</surname><given-names>AE</given-names></name><name><surname>Brendan</surname><given-names>CY</given-names></name><name><surname>Daniels</surname><given-names>D</given-names></name><name><surname>Bolagani</surname><given-names>A</given-names></name><name><surname>Kwan</surname><given-names>L</given-names></name><name><surname>Walker</surname><given-names>D</given-names></name><etal/></person-group> <article-title>Measurement accuracy of 3-dimensional mapping technologies versus standard goniometry for angle assessment</article-title>. <source>J Pediatr Urol</source>. (<year>2020</year>) <volume>16</volume>(<issue>5</issue>):<fpage>547</fpage>&#x2013;<lpage>54</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2020.08.021</pub-id><pub-id pub-id-type="pmid">32980263</pub-id></citation></ref>
<ref id="B43"><label>43.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fernandez</surname><given-names>N</given-names></name><name><surname>Fl&#x00F3;rez-Valencia</surname><given-names>L</given-names></name><name><surname>Prada</surname><given-names>JG</given-names></name><name><surname>Chua</surname><given-names>M</given-names></name><name><surname>Villanueva</surname><given-names>C</given-names></name></person-group>. <article-title>Standardization of penile angle estimation with a semi-automated algorithm</article-title>. <source>J Pediatr Urol</source>. (<year>2021</year>) <volume>17</volume>(<issue>2</issue>):<fpage>226</fpage>.<comment>e1&#x2013;.e6</comment>. <pub-id pub-id-type="doi">10.1016/j.jpurol.2021.01.006</pub-id></citation></ref>
<ref id="B44"><label>44.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>J</given-names></name><name><surname>Sun</surname><given-names>K</given-names></name><name><surname>Cheng</surname><given-names>T</given-names></name><name><surname>Jiang</surname><given-names>B</given-names></name><name><surname>Deng</surname><given-names>C</given-names></name><name><surname>Zhao</surname><given-names>Y</given-names></name><etal/></person-group> <article-title>Deep high-resolution representation learning for visual recognition</article-title>. <source>IEEE transactions on Pattern Analysis and Machine Intelligence</source>. (<year>2020</year>) <volume>43</volume>(<issue>10</issue>):<fpage>3349</fpage>&#x2013;<lpage>64</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2020.2983686</pub-id></citation></ref>
<ref id="B45"><label>45.</label><citation citation-type="confproc"><person-group person-group-type="editor"><name><surname>Deng</surname><given-names>J</given-names></name><name><surname>Dong</surname><given-names>W</given-names></name><name><surname>Socher</surname><given-names>R</given-names></name><name><surname>Li</surname><given-names>L-J</given-names></name><name><surname>Li</surname><given-names>K</given-names></name><name><surname>Fei-Fei</surname><given-names>L</given-names></name></person-group>, editors. <conf-name>Imagenet: a large-scale hierarchical image database</conf-name>. <conf-name>2009 IEEE conference on computer vision and pattern recognition</conf-name> (<year>2009</year>). <publisher-name>IEEE</publisher-name>.</citation></ref>
<ref id="B46"><label>46.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbas</surname><given-names>TO</given-names></name><name><surname>Hatem</surname><given-names>M</given-names></name><name><surname>Chandra</surname><given-names>P</given-names></name></person-group>. <article-title>Plate objective scoring tool: a new preoperative indicator of penile curvature degree in children with distal hypospadias</article-title>. <source>Int J Urol</source>. (<year>2022</year>) <volume>29</volume>(<issue>6</issue>):<fpage>511</fpage>&#x2013;<lpage>5</lpage>. <pub-id pub-id-type="doi">10.1111/iju.14822</pub-id><pub-id pub-id-type="pmid">35229353</pub-id></citation></ref></ref-list>
</back>
</article>