<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Built Environ.</journal-id>
<journal-title>Frontiers in Built Environment</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Built Environ.</abbrev-journal-title>
<issn pub-type="epub">2297-3362</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1026225</article-id>
<article-id pub-id-type="doi">10.3389/fbuil.2022.1026225</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Built Environment</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Automatic assessment of roofs conditions using artificial intelligence (AI) and unmanned aerial vehicles (UAVs)</article-title>
<alt-title alt-title-type="left-running-head">Alzarrad et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fbuil.2022.1026225">10.3389/fbuil.2022.1026225</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Alzarrad</surname>
<given-names>Ammar</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1110621/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Awolusi</surname>
<given-names>Ibukun</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/630626/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hatamleh</surname>
<given-names>Muhammad T.</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2016902/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Terreno</surname>
<given-names>Saratu</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Department of Civil Engineering</institution>, <institution>Marshall University</institution>, <addr-line>Huntington</addr-line>, <addr-line>WV</addr-line>, <country>United States</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>School of Civil &#x26; Environmental Engineering and Construction Management</institution>, <institution>The University of Texas at San Antonio</institution>, <addr-line>San Antonio</addr-line>, <addr-line>TX</addr-line>, <country>United States</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Department of Civil Engineering</institution>, <institution>The University of Jordan</institution>, <addr-line>Amman</addr-line>, <country>Jordan</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Department of Civil Engineering and Construction</institution>, <institution>Bradley University</institution>, <addr-line>Peoria</addr-line>, <addr-line>IL</addr-line>, <country>United States</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1030042/overview">Amir Mahdiyar</ext-link>, University of Science Malaysia, Malaysia</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1694998/overview">Salman Riazi Mehdi Riazi</ext-link>, Universiti Sains Malaysia (USM), Malaysia</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/715944/overview">Paul Netscher</ext-link>, Independent researcher, Perth, WA, Australia</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Ammar Alzarrad, <email>alzarrad@marshall.edu</email>
</corresp>
<fn fn-type="other">
<p>This article was submitted to Construction Management, a section of the journal Frontiers in Built Environment</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>14</day>
<month>10</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>8</volume>
<elocation-id>1026225</elocation-id>
<history>
<date date-type="received">
<day>23</day>
<month>08</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>03</day>
<month>10</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2022 Alzarrad, Awolusi, Hatamleh and Terreno.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Alzarrad, Awolusi, Hatamleh and Terreno</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Building roof inspections must be performed regularly to ensure repairs and replacements are done promptly. These inspections get overlooked on sloped roofs due to the inefficiency of manual inspections and the difficulty of accessing sloped roofs. Walking a roof to inspect each tile is time-consuming, and as the roof slope increases, this difficulty increases the time needed for an inspection. Moreover, there is an intrinsic safety risk involved. Falls from roofs tend to cause severe and expensive injuries. The emergence of new sensing technologies and artificial intelligence (AI) such as high-resolution imagery and deep learning has enabled humans to move beyond the concept of using manual labor in damage assessments. It has brought significant advantages in the field of safety management, and it can be a substitute for the traditional assessment of roofs. This study uses unmanned aerial vehicles (UAVs) and deep learning technology to perform sloped roof inspections effectively, thus eliminating the safety risk involved in traditional manual inspections. This study utilizes UAVs and deep learning to automatically collect and classify roof imagery to identify missing shingles on the roof. The proposed research can help real estate agents, insurance companies, and others make better and more informed decisions about roof conditions. Future research could be refining the model to deal with different types of defects in addition to missing shingles.</p>
</abstract>
<kwd-group>
<kwd>roof inspections</kwd>
<kwd>unmanned aerial Vehicles</kwd>
<kwd>artificial intelligence</kwd>
<kwd>convolutional neural Network</kwd>
<kwd>deep learning</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<title>Introduction</title>
<p>Rooftops could become obsolete because of the loss of shingles, or they get completely damaged due to storms and hail. There are millions of dollars in property insurance claims filed by homeowners every year, resulting from these damages. There is an inherent delay in claiming process, which can be hectic for customers. To mitigate this problem, insurance companies prefer periodic assessments of rooftops. Periodic assessment is imperative as it can cut down the cost significantly. However, two reasons hinder the periodic assessment process. The first reason is the risk of falling from the roof, and the second is the extra cost due to inaccuracy and human error. Falling from heights is one of the most significant causes of accidents in the construction industry. Construction workers in general and roofers, in particular, are exposed to risk when they get on a roof simply due to gravity and physical limitations such as weight or age that increase the risk of climbing on a roof. According to a study done by the Center for Construction Research and Training (CPWR), fatal falls to a lower-level account for more than one in three (36.4%) construction deaths in 2020. The study further states that roofs were the primary source of those fatal injuries (<xref ref-type="bibr" rid="B12">Brown et al., 2021</xref>).</p>
<p>The incorporation of Artificial Intelligence (AI) and drones has enabled the performance of high-level operations without the intervention of humans. Along with other benefits, it can efficiently inspect the infrastructure. The overall drone inspection and monitoring market is expected to grow from USD 9.1 billion in 2021 to USD 33.6 billion by 2030, at a compound annual growth rate of 15.7% from 2021 to 2030 (<xref ref-type="bibr" rid="B18">Intelligence, 2022</xref>). Deep learning, a subpart of Artificial Intelligence, has shown promising results when it comes to computer vision applications. In particular, Convolutional Neural Network (CNN) can process high-level drone imagery and perform classification and object detection <italic>via</italic> different algorithms (i.e., YOLO, Resnet-50, VGG-f, and Alexnet) (<xref ref-type="bibr" rid="B22">Mseddi et al., 2021</xref>). Also, it is a modern approach to data classification which has shown recent advancements in unsupervised image classification, in some cases producing results with higher accuracy than humans (<xref ref-type="bibr" rid="B31">Wan et al., 2014</xref>).</p>
<p>The deployment of this category of technologies has not been explored for the effective inspection of sloped roofs with the capacity to reduce and eliminate the safety risks associated with the conventional roof inspection approach. To bridge this gap, this study investigates the implementation of UAVs and deep learning technology for the inspection of sloped roofs through the automatic collection and classification of roof imagery to identify missing shingles on the roof. A residential roof condition assessment method using techniques from deep learning is presented in this research. The proposed method operates on residential roofs and divides the task into two stages; 1) using drones to collect high-resolution roof images; and 2) developing and training a deep learning model, convolutional neural network (CNN), to classify roofs into two categories of good and bad conditions based on missing shingles.</p>
</sec>
<sec id="s2">
<title>Literature review</title>
<p>Many research studies have reported the application of deep learning for image processing, mainly for concrete structures and road condition assessment. This section presents a review of these applications starting with road condition assessment, followed by concrete structure condition assessment, and then wrapping up the review with the sparse application for roof assessment necessitating the need for this study. The first subsection below will summarize the work that has been done in the area of road assessment using Artificial Intelligence methods.</p>
<sec id="s2-1">
<title>Road condition assessment</title>
<p>Many researchers have been working to optimize road conditions assessment, an important research area worthy of exploration for instance due to the high cost associated with road maintenance and the fact that road infrastructure globally is always not in its best shape. <xref ref-type="bibr" rid="B30">Ukhwah et al. (2019)</xref> proposed a novel approach for asphalt pavement road detection using YOLO neural network. Different YOLO versions such as Yolo v3, Yolo v3 Tiny, and Yolo v3 SPP are evaluated based on accuracy and mean average precision. The results show that mAP of Yolo v3, Yolo v3 Tiny, and Yolo v3 SPP were 83.43%, 79.33%, and 88.93%, with the accuracy of 64.45%, 53.26%, and 72.10%, respectively. It takes only 0.04&#xa0;s to detect the image, which signifies the robustness of their proposed models. In a recent study, (<xref ref-type="bibr" rid="B23">Mubashshira et al., 2020</xref>), proposed a new model for crack detection on roads using unsupervised learning of deep learning technology. Color histograms of roads are used for the analysis of road pavements in their study. Afterward, K clustering and OTSU thresholding were employed for segmentation in order to detect the cracks. Results showed that the model performed well in detecting and localizing the damage in the image. In another study, (<xref ref-type="bibr" rid="B29">Sizyakin et al., 2020</xref>) proposed a new model for crack detection on roads using deep learning. In their work, they used the publicly available dataset CRACK500 and trained them on the state-of-the-art object detection algorithm U-net. Moreover, the morphological fitting is used to increase the binary map so the model can detect cracks with higher accuracy. <xref ref-type="bibr" rid="B6">Aravindkumar et al. (2021)</xref> proposed a model for automatic crack detection on roads using the deep learning technique. A total of 3,533 road images were taken as a dataset of roads of Tamil Nadu, India. They used the transfer learning technique using ResNet-152 classifier with the incorporation of a faster region-based convolutional neural network (Faster R-CNN). Images were trained based on a variety of damages and cracks and then notifying the relevant authority about the locations of these damages. From the results, it was concluded that their model worked effectively.</p>
<p>
<xref ref-type="bibr" rid="B8">Bhat et al. (2020)</xref> proposed a model for crack detection on roads using deep learning technique. In their work, they exploited various models from traditional CNN and image processing to segmentation. A variety of classifiers are also analyzed, however, based on the result and findings CNN outperformed all other models due to its high accuracy. <xref ref-type="bibr" rid="B16">Elghaish et al. (2021)</xref> proposed a model for the classification and detection of highway cracks using deep learning. A total of 4,663 images were taken as dataset which were classified into subparts as &#x201c;horizontal cracks&#x201d;, &#x201c;horizontal and vertical cracks&#x201d; and diagonal cracks. State of the art object detection algorithms were used and a CNN model was develop in order to increase the accuracy of the proposed model. From the results it was concluded that the pre-trained model googlenet has higher accuracy of 89.08%. However, the new created CNN model exceeded the accuracy of all other algorithms with 97.62% accuracy rate with the use of adam&#x2019;s optimizer.</p>
<p>
<xref ref-type="bibr" rid="B9">Bhavya et al. (2021)</xref> proposed a model for pothole detection using deep learning using classification techniques. Images that were further divided into roads with potholes and roads without potholes were taken. They used a state-of-the-art pre-trained model Resnet-50 as a classifier. After the results it was concluded that the model can accurately classify between different kinds of roads and can replace external manpower in assessing road conditions. <xref ref-type="bibr" rid="B26">Ping et al. (2020)</xref> proposed an efficient deep learning model for detecting potholes using state-of- the-art objects detection models such as YOLO, SSD, HOG with SVM, and Faster-RCNN. The images were pre-processed and labeled accordingly. Their results showed that YOLO performed better than other object detecting for its higher accuracy and faster computation. <xref ref-type="bibr" rid="B25">Pan et al. (2018)</xref> proposed a new model for pothole detection on roads using deep learning and UAVs. In their study, spectral images were taken using UAVs and by the use of machine learning algorithms such as support vector machine (SVM) and random forest. Data is classified into normal pavements and pavements with potholes and cracks. They evaluated different models in their research and concluded that came to the conclusion that remote sensing <italic>via</italic> UAVs can offer an alternative to traditional assessment of roads.</p>
<p>
<xref ref-type="bibr" rid="B7">Arman et al. (2020)</xref> presented a model for classification and detecting of road damage using deep learning models. Images were taken from a smartphone camera in the city of Dhaka. Damages such as a pothole, crack, and revealing were taken into account for this model. Their model&#x2019;s images were fed into Faster RCNN, and RCNN for object detection and support vector machine algorithm of machine learning is used for classification purposes. The result demonstrated that they achieved 98.88% damage detection and classification accuracy. <xref ref-type="bibr" rid="B33">Wang et al. (2018)</xref> proposed a deep learning model for damage detection and classification in road networks. They used the SSD and Faster R-CNN in their work for detection and classification. The results were demonstrated in the IEEE BigData Road Damage Detection Challenge, and according to the results, their model performed better. <xref ref-type="bibr" rid="B4">Alfarrarjeh et al. (2018)</xref> suggested a deep learning model for road damage detection using images taken from mobile. According to their model, they trained their object detector algorithm on various images of damages as defined by Japan Road Association. Results show that their model performed well despite low imagery resolution and achieved an F1 (Accuracy) score up to 0.62. <xref ref-type="bibr" rid="B28">Seydi et al. (2020)</xref> proposed a model for the assessment of roads network damages caused by an earthquake. They used LIDAR point cloud in their model. The proposed model is based on three steps. In the first step, features of LIDAR data were extracted using CNN. In the second step, another neural network: Multilayer perceptron (MLP), is used to detect the debris in the data (images). The output from MLP was fed into another neural network to classify the road segments into blocked and unblocked. Their model performed well and achieved an accuracy of 97%.</p>
<p>
<xref ref-type="bibr" rid="B21">Liu et al. (2020)</xref> proposed a novel approach for detecting damages inroads using a deep learning model. At first, they used segmentation technique to detect roads areas, then data was fed into object detection models You look Only Once (YOLOv4) and Faster Regional Convolutional Neural Network (F-RCNN) for detecting the damages. The result shows that the proposed model achieved good object detection capability in the IEEE Global Road Damage Detection Challenge 2020. <xref ref-type="bibr" rid="B3">Ale et al. (2018)</xref> proposed a faster and more accurate deep learning model for damages to the roads. In their model, classification and bounding box regression are done in a single stage as it is much faster than two stages, which are classification in one step and bounding box regression in the second step. They have trained the images on several one-stage models and inferred that a model RetinaNet can detect road damages with high accuracy. <xref ref-type="bibr" rid="B10">Bojarczak et al. (2021)</xref> proposed a model damage diagnosis of railways using a deep learning model and UAVs. They used a fully convoluted network (I.e., semantic segmentation) to locate the railhead&#x2019;s defects. Their model is based on an OpenCV python library&#x2019;s tensor flow work environment. The results showed that their model could accurately locate defects in the railway with an efficiency rate of 81%.</p>
<p>To summarize, this section shows that deep learning can be used successfully to diagnose and identify damage (including potholes and cracks) on asphalt roads and highways. The next subsection will discuss the assessment of concrete structures for automated crack detection using deep learning and other techniques.</p>
</sec>
<sec id="s2-2">
<title>Concrete structures condition assessment</title>
<p>Concrete structure condition assessment has been an interest to the Federal Department of Transportation for many years, especially the assessment of the bridge&#x2019;s conditions. <xref ref-type="bibr" rid="B24">Otero et al. (2018)</xref> proposed a model for remote sensing of concrete bridge inspection using deep learning technology. In their work unsupervised learning approach is used to extract the features, i.e., cracks from the images. The dataset consisted of different kind of damages along with noise factor. After testing the model, they evaluated that algorithm can successfully detect cracks in different kind of images. <xref ref-type="bibr" rid="B14">Chen et al. (2020)</xref> proposed a model for damage detection in concrete bridges using a deep learning model. Their model is based on the technique of transfer learning, taking an existing state-of-the-art object detection algorithm YOLOv3 is used for object localizing. For the extraction of small features, deformable convolution is used. The tests showed that their model is more effective and takes less time in computation.</p>
<p>
<xref ref-type="bibr" rid="B19">Kim et al. (2020)</xref> proposed a deep learning model for the detection of multiple cracks in concrete. Their model used Masked-RCNN, images of cracks, efflorescence, rebar exposure, and spalling were trained using Masked-RCNN. According to the results, their model achieved a precision of 90.4%, which promises the applicability of Masked-RCNN in damage detection. <xref ref-type="bibr" rid="B27">Rajadurai et al. (2021)</xref> proposed a model for using deep learning for automated crack detection in concrete. Transfer learning technique is used, and images (with cracks and no cracks) were fed into Alexnet model. Stochastic gradient descent was used as an optimizer to prevent high loss. Their model showed an accuracy of 99.99% when tested with test images. <xref ref-type="bibr" rid="B13">Cha et al. (2017)</xref> came up with a novel approach to detecting cracks in concrete automatically, and they used a convolutional neural network for their model. The model was trained on 40,000 images with a 256 &#xd7; 256 pixels resolution. Their model outperformed all previous work on image processing and achieved 98% accuracy. Furthermore, they validated their result by testing data of 50 images of 5,888 &#xd7; 3,584-pixel resolution. The result showed that their model could work in real-time situations.</p>
<p>
<xref ref-type="bibr" rid="B15">Desilva et al. (2018)</xref> proposed a model for automatic detection of concrete cracks <italic>via</italic> deep learning and UAVs. A dataset of 3500 images was taken with different conditions such as daylight intensity, surface finishes, and humidity. The dataset was divided into training and testing with a ratio of 80/20. They used VGG-16 on their dataset, on which the model performed well. The overall accuracy of this model is 92.27% which shows the potential of deep learning in the detection of concrete cracks. <xref ref-type="bibr" rid="B20">Kumar et al. (2022)</xref> suggested a model for real-time monitoring of cracks in high rises concrete building. A total of 800 RGB pixel images (480 &#xd7; 480 pixels) were fed into the You Look Only Once (YOLO-3) algorithm as input collected using UAVs. Images were annotated manually using open-source software. Their model outperformed all other previous work achieving an accuracy of 94.24%, and their model can process an image in 0.033&#xa0;s.</p>
<p>All in all, detecting cracks in concrete automatically has been investigated by many researchers. Deep learning has been used successfully for this purpose. The next subsection will discuss the automation of roof condition assessment using deep learning and other Artificial Intelligent techniques.</p>
</sec>
<sec id="s2-3">
<title>Roof condition assessment</title>
<p>Up to the date of this research, only two papers have been found on the application of deep learning to automatically assess a residential house roof condition. <xref ref-type="bibr" rid="B17">Hezaveh et al. (2021)</xref> proposed a model for the automatic assessment of hail-damaged roofs. The dataset consisted of roof images that were damaged due to hail. In their model, the researchers used Unmanned Air Vehicle (UAV) to capture high-resolution RGB imagery. The images were trained in different types of convolutional networks. They found that their model can accurately identify hail damage on residential roofs. <xref ref-type="bibr" rid="B32">Wang et al. (2019)</xref> presented a residential roof condition assessment method using techniques from deep learning. The proposed method operates on individual roofs and divides the task into two stages: 1) roof segmentation, followed by 2) condition classification of the segmented roof regions. The proposed algorithm has yielded promising results and has outperformed traditional machine learning methods.</p>
<p>Obviously, the utilization of deep learning for roof conditions assessment is still lacking and needs more investigation. This paper will focus on missing roof shingles, and it will use YOLO v5 algorithm for the detection of damaged areas in roofs. The following section will discuss the research methodology in detail.</p>
</sec>
</sec>
<sec sec-type="methods" id="s3">
<title>Methodology</title>
<p>The aim of the study is to develop a deep learning model which can automatically locate and identify missing shingles in the roof. This study explores new possibilities to evaluate and localize the damaged areas in the roofs using the computer vision technique. The study utilizes the transfer learning technique of deep learning technology in which a pre-trained network is taken and fine-tuned until the loss function is minimum (<xref ref-type="bibr" rid="B11">Brown and Miller, 2018</xref>). This work incorporates YOLO v5 algorithm for the detection of missing shingles in roofs. YOLO v5 is a state-of-the-art object detection algorithm that has high accuracy and offers fasters computation. YOLO v5 divides input images into grids and each cell is responsible for object detection. It can effectively extract high and low-level features of input images based on the healthiness of the dataset. The model utilizes a supervised learning technique in which input data is already provided along with the labeled output. Here, a pool of images is taken and annotated using a labeling tool which is fed into the object detection algorithm (<xref ref-type="bibr" rid="B2">Aguera-Vega et al., 2017</xref>).</p>
<p>As shown in <xref ref-type="fig" rid="F1">Figure 1</xref>, high-resolution RGB images of damaged roofs are captured using a UAV which are annotated using a labeling toolkit. The dataset is then classified into test and train sets and are further trained on YOLO v5 using python library &#x201c;Tensor Flow Object Detection API&#x201d;. Tensor flow is an open-source platform developed by Google, having special emphasis on deep learning models. Once the dataset is trained, its accuracy is checked against the test images for model evaluation (<xref ref-type="bibr" rid="B1">Adams et al., 2014</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Methodology of model.</p>
</caption>
<graphic xlink:href="fbuil-08-1026225-g001.tif"/>
</fig>
<p>The whole process utilizes a virtual machine using Google Colab for faster computational purposes. The study is classified into three stages.</p>
<sec id="s3-1">
<title>Stage no. 1: Data acquisition</title>
<p>Gathering healthy data is one of the most important parameters in deep learning model training (<xref ref-type="bibr" rid="B5">Alzarrad et al., 2021</xref>). The researcher suggested utilizing a drone to capture high-resolution images. The dataset acquired is annotated using an open-source tool (labelImg). The ground truth acquired from the labeled dataset is then fed to an algorithm for training.</p>
</sec>
<sec id="s3-2">
<title>Stage no. 2: Training the model</title>
<p>In this step, the transfer learning technique is applied, where the dataset is classified into test and train sets. YOLO v5 pertained model is utilized for this purpose. Depending on the size of the dataset, batch size and epochs are then evaluated. &#x201c;Tensor flow object detection API&#x201d; is then used for training and validating the model.</p>
</sec>
<sec id="s3-3">
<title>Stage no. 3: Model evaluation</title>
<p>In this stage, the model is evaluated based on the accuracy of the prediction. The model is then checked to see if predictions are made accurately. Random test set images are then fed to the model and will check if the model accurately localized the region of interest in the images.</p>
</sec>
</sec>
<sec id="s4">
<title>Case study</title>
<p>To illustrate an implementation of the presented model, a case study is used to verify and validate the model&#x2019;s robustness. The researchers used DJI Matrice 300 RTK drone equipped with Zenmuse H20T, which has the capacity to zoom up to 20x (<xref ref-type="fig" rid="F2">Figure 2</xref>) to collect roof images for two different houses in Hurricane, West Virginia.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Collecting data using DJI Matrice 300 RTK drone.</p>
</caption>
<graphic xlink:href="fbuil-08-1026225-g002.tif"/>
</fig>
<p>The drone covered each roof by means of manually composing and capturing mainly oblique aerial images. The individual roof inspection images were carefully composed by means of &#x201c;first person view&#x201d; (FPV); a facility which presents the drone operator with a real-time view of the scene as it is being captured by the on-board camera. To ensure completeness, a methodical sequence was followed in a flight that generally went around the roof in a counterclockwise fashion. Given the complex design and considerable height of the roof profiles (the highest point of the roof being some 28&#xa0;ft above ground level) the inspection by air certainly saved considerable costs in time and money, and generally reduced the risk of injury and the potential for disturbance to the occupants. <xref ref-type="fig" rid="F3">Figure 3</xref> below shows the most common defects, namely cracked or slipped roof tiles, identifiable on the manually composed aerial images.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Common defects identified on the manually composed aerial images.</p>
</caption>
<graphic xlink:href="fbuil-08-1026225-g003.tif"/>
</fig>
<p>A total of three hundred and fifty (350) photos were collected from the first house and used to train the model. Two hundred (200) photos were collected from the second house and used to validate the model accuracy. All photos were annotated using an open-source tool (labelImg). To find and remove duplicate and near-duplicate images in the image dataset. The researchers use FiftyOne, an open-source ML developer tool. FiftyOne provides a method to compute the uniqueness of every image in a dataset. which results in a score for every image indicating how unique the contents of the image are with respect to all other images. Images with a low uniqueness value are potential duplicates that researchers explored to remove if needed.</p>
<p>
<xref ref-type="fig" rid="F4">Figure 4A</xref> shows a sample of the photos that have been used to train the model. <xref ref-type="fig" rid="F4">Figure 4B</xref> shows a sample of the photos that have been used to validate the model. The accuracy of the model using training data was 72%, while the accuracy of the model using the validation photos from the second house was 81%. The model did not pick up 19% of the defects, mainly because of poor photo quality or defects being too small. The model has not detected a defect when there was no defect (no false positive detection).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>
<bold>(A)</bold> (left) shows the accuracy of the model for the training photos, which is 72%. <bold>(B)</bold> (right) shows the accuracy of the model for validation photos which is 81%.</p>
</caption>
<graphic xlink:href="fbuil-08-1026225-g004.tif"/>
</fig>
<p>The model is capable of detecting missing shingles only and no other defects have been investigated for this stage of the research. The researchers tested the model&#x2019;s accuracy by comparing the model&#x2019;s prediction <italic>vs</italic>. humans&#x2019; prediction. A sample of the images that have been used to validate the model was sent to a graduate student. The student went through 100 randomly selected photos and categorized them as damaged and undamaged. The researchers compared the student prediction result with the model result and found that the model accuracy is good. The model accuracy turned out to be 0.81 (81%), indicating that 81 correct predictions were made out of 100 total examples. Further, to fully evaluate the effectiveness of the model, the researchers examine the model&#x2019;s precision. Precision quantifies the number of positive class predictions that actually belong to the positive class (i.e., how many good condition photos have been classified as good conditions). The model precision was 0.86, as shown in <xref ref-type="fig" rid="F5">Figure 5</xref>.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>The model precision.</p>
</caption>
<graphic xlink:href="fbuil-08-1026225-g005.tif"/>
</fig>
<p>Further, the researchers examine the model F1 score. F1 score measures the percentage of correct predictions that the model has made. An F1 score is considered perfect when it is 1, while the model is a total failure when it is 0. The F1 score for this model was 0.832, which is good as shown in <xref ref-type="fig" rid="F6">Figure 6</xref>.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>The F1 score for the model.</p>
</caption>
<graphic xlink:href="fbuil-08-1026225-g006.tif"/>
</fig>
</sec>
<sec sec-type="conclusion" id="s5">
<title>Conclusion</title>
<p>8As important as it is to maintain the integrity of roofs in residential buildings, the routine inspection required to achieve this by ascertaining the characteristics and recommending the need for repairs or replacements is usually a daunting task, particularly, when it comes to sloped roofs. The level of inefficiencies and worker injuries experienced in current roof inspection practices expose the weaknesses in traditional inspection methods. The results of this research show that the implementation of deep learning techniques for processing imagery data has the potential to help mitigate or eliminate these challenges. In this study, the researchers used a transfer learning methodology by implementing YOLOv5 to automatically identify damaged areas specifically missing shingles on residential roofs. The model which was manually tested with a real case study shows a high level of performance indicating the potential of this technique for the automatic assessment of roofs. The model developed in the study after training and validation were found to have an accuracy of approximately 81% and a precision of 86% despite the limited amount of dataset. Although the relatively low amount of data used in the modeling is a limitation of this study, the level of accuracy and precision achieved further reinforces the potency of the methodology adopted and the model developed for the roof assessment. The use of this model can significantly benefit roofers and insurance companies for their timely decisions. This study is a steppingstone toward high-end roof damage assessment; with a large amount of dataset, the model accuracy could be improved. Lastly, for future research, the model can be converted to tensor flow lite to allow its use on smartphones such as Android, iPhone, and Raspberry Pi.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="s7">
<title>Ethics statement</title>
<p>Written informed consent was obtained from the individual for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec id="s8">
<title>Author contributions</title>
<p>All authors listed have made a substantial, direct and intellectual contribution to the work, and approved it for publication.</p>
</sec>
<sec id="s9">
<title>Funding</title>
<p>The research leading to the publication of this article was partially supported by the Department of Civil Engineering at Marshall University in Huntington, West Virginia, United States of America.</p>
</sec>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Adams</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Levitan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Friedland</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>High resolution imagery collection for postdisaster studies utilizing unmanned aircraft systems (UAS)</article-title>. <source>Photogramm. Eng. remote Sens.</source> <volume>80</volume> (<issue>12</issue>), <fpage>1161</fpage>&#x2013;<lpage>1168</lpage>. <pub-id pub-id-type="doi">10.14358/pers.80.12.1161</pub-id> </citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aguera-Vega</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Carvajal-Ramirez</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Martinez-Carricondo</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Accuracy of digital surface models and orthophotos derived from unmanned aerial vehicle photogrammetry</article-title>. <source>J. Surv. Eng.</source> <volume>143</volume> (<issue>2</issue>), <fpage>04016025</fpage>. <pub-id pub-id-type="doi">10.1061/(asce)su.1943-5428.0000206</pub-id> </citation>
</ref>
<ref id="B3">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Ale</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Road damage detection using RetinaNet</article-title>,&#x201d; in <conf-name>Proceedings of the 2018 IEEE International Conference on Big Data (Big Data)</conf-name>, <conf-loc>WA, USA</conf-loc>, <conf-date>December 2018</conf-date>. </citation>
</ref>
<ref id="B4">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Alfarrarjeh</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Trivedi</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>S. H.</given-names>
</name>
<name>
<surname>Shahabi</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>A deep learning approach for road damage detection from smartphone images</article-title>,&#x201d; in <conf-name>Proceedings of the 2018 IEEE International Conference on Big Data (Big Data)</conf-name>, <conf-loc>WA, USA</conf-loc>, <conf-date>December 2018</conf-date>. </citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Alzarrad</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Emanuels</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Imtiaz</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Akbar</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Automatic assessment of buildings location fitness for solar panels installation using drones and neural network</article-title>. <source>CivilEng</source> <volume>2</volume> (<issue>4</issue>), <fpage>1052</fpage>&#x2013;<lpage>1064</lpage>. <pub-id pub-id-type="doi">10.3390/civileng2040056</pub-id> </citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aravindkumar</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Varalakshmi</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Alagappan</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Automatic road surface crack detection using Deep Learning Techniques</article-title>. <source>Artif. Intell. Technol.</source> <volume>806</volume>, <fpage>37</fpage>&#x2013;<lpage>44</lpage>. <pub-id pub-id-type="doi">10.1007/978-981-16-6448-9_4</pub-id> </citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Arman</surname>
<given-names>M. S.</given-names>
</name>
<name>
<surname>Hasan</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Sadia</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Shakir</surname>
<given-names>A. K.</given-names>
</name>
<name>
<surname>Sarker</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Himu</surname>
<given-names>F. A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Detection and classification of road damage using R-CNN and faster R-CNN: A deep learning approach</article-title>. <source>Cyber Secur. Comput. Sci.</source> <volume>325</volume>, <fpage>730</fpage>&#x2013;<lpage>741</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-52856-0_58</pub-id> </citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bhat</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Naik</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Gaonkar</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sawant</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Aswale</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Shetgaonkar</surname>
<given-names>P.</given-names>
</name>
</person-group>.<year>2020</year> <article-title>A survey on road crack detection techniques</article-title>. <conf-name>Proceedings of the 2020 International Conference on Emerging Trends in Information Technology and Engineering (Ic-ETITE)</conf-name>, <conf-date>February 2020</conf-date>. <conf-loc>Vellore, India</conf-loc>: <pub-id pub-id-type="doi">10.1109/ic-etite47903.2020.67</pub-id> </citation>
</ref>
<ref id="B9">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Bhavya</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Sharmila</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Sai Sadhvi</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Prasanna</surname>
<given-names>C. M.</given-names>
</name>
<name>
<surname>Ganesan</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Pothole detection using deep learning</article-title>,&#x201d; in <source>Smart technologies in data science and communication</source> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>233</fpage>&#x2013;<lpage>243</lpage>. <pub-id pub-id-type="doi">10.1007/978-981-16-1773-7_19</pub-id> </citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bojarczak</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Lesiak</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>UAVs in rail damage image diagnostics supported by deep-learning networks</article-title>. <source>Open Eng.</source> <volume>11</volume> (<issue>1</issue>), <fpage>339</fpage>&#x2013;<lpage>348</lpage>. <pub-id pub-id-type="doi">10.1515/eng-2021-0033</pub-id> </citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Brown</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Miller</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>The use of unmanned aerial vehicles for sloped roof inspections &#x2013; considerations and constraints</article-title>. <source>J. Facil. Manag. Educ. Res.</source> <volume>2</volume> (<issue>1</issue>), <fpage>12</fpage>&#x2013;<lpage>18</lpage>. <pub-id pub-id-type="doi">10.22361/jfmer/93832</pub-id> </citation>
</ref>
<ref id="B12">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Brown</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Harris</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Brooks</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Sue Dong</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2021</year>). <source>Data bulletin. Fatal injury trends in the construction industry</source>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://www.cpwr.com/wp-content/uploads/DataBulletin-February-2021.pdf">https://www.cpwr.com/wp-content/uploads/DataBulletin-February-2021.pdf</ext-link>
</comment>. </citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cha</surname>
<given-names>Y.-J.</given-names>
</name>
<name>
<surname>Kang</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Damage detection with an autonomous UAV using Deep Learning</article-title>. <source>Sensors Smart Struct. Technol. Civ. Mech. Aerosp. Syst.</source> <volume>10598</volume>, <fpage>1059804</fpage>. <pub-id pub-id-type="doi">10.1117/12.2295961</pub-id> </citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Ye</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Bridge damage detection and recognition based on Deep Learning</article-title>. <source>J. Phys. Conf. Ser.</source> <volume>1626</volume> (<issue>1</issue>), <fpage>012151</fpage>. <pub-id pub-id-type="doi">10.1088/1742-6596/1626/1/012151</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Desilva</surname>
<given-names>W. R.</given-names>
</name>
<name>
<surname>Lucena</surname>
<given-names>D. S.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Concrete cracks detection based on deep learning image classification</article-title>. <source>Proceedings</source> <volume>2</volume> (<issue>8</issue>), <fpage>489</fpage>. <pub-id pub-id-type="doi">10.3390/icem18-05387</pub-id> </citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Elghaish</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Talebi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Abdellatef</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Matarneh</surname>
<given-names>S. T.</given-names>
</name>
<name>
<surname>Hosseini</surname>
<given-names>M. R.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Developing a new deep learning CNN model to detect and classify highway cracks</article-title>. <source>J. Eng. Des. Technol.</source> <volume>20</volume>, <fpage>993</fpage>&#x2013;<lpage>1014</lpage>. <comment>ahead-of-print(ahead-of-print)</comment>. <pub-id pub-id-type="doi">10.1108/jedt-04-2021-0192</pub-id> </citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hezaveh</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Kanan</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Salvaggio</surname>
<given-names>C.</given-names>
</name>
</person-group>, <year>2017</year>. <article-title>Roof damage assessment using deep learning</article-title>. <conf-name>Proceedings of the 2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)</conf-name>, <conf-date>October 2017</conf-date>. <conf-loc>DC, USA</conf-loc>: <pub-id pub-id-type="doi">10.1109/aipr.2017.8457946</pub-id> </citation>
</ref>
<ref id="B18">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Intelligence</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2022</year>). <source>Drone market outlook in 2022: Industry growth trends, market stats and forecast. Business Insider</source>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://www.businessinsider.com/drone-industry-analysis-market-trends-growth-forecasts">https://www.businessinsider.com/drone-industry-analysis-market-trends-growth-forecasts</ext-link>
</comment>. </citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Cho</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Automated multiple concrete damage detection using instance segmentation deep learning model</article-title>. <source>Appl. Sci.</source> <volume>10</volume> (<issue>22</issue>), <fpage>8008</fpage>. <pub-id pub-id-type="doi">10.3390/app10228008</pub-id> </citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kumar</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Batchu</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Swamy</surname>
<given-names>S., N.</given-names>
</name>
<name>
<surname>Kota</surname>
<given-names>S. R.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Real-time concrete damage detection using deep learning for high rise structures</article-title>. <source>IEEE Access</source> <volume>9</volume>, <fpage>112312</fpage>&#x2013;<lpage>112331</lpage>. <pub-id pub-id-type="doi">10.1109/access.2021.3102647</pub-id> </citation>
</ref>
<ref id="B21">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Deep Network for road damage detection</article-title>,&#x201d; in <conf-name>Proceedings of the 2020 IEEE International Conference on Big Data (Big Data)</conf-name>, <conf-loc>GA, USA</conf-loc>, <conf-date>December 2020</conf-date>. </citation>
</ref>
<ref id="B22">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Mseddi</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Sedrine</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Attia</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Yolov5 based visual localization for Autonomous Vehicles</article-title>,&#x201d; in <conf-name>Proceedings of the 2021 29th European Signal Processing Conference (EUSIPCO)</conf-name>, <conf-loc>Dublin, Ireland</conf-loc>, <conf-date>August 2021</conf-date>. </citation>
</ref>
<ref id="B23">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Mubashshira</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Azam</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Masudul Ahsan</surname>
<given-names>S. M.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>An unsupervised approach for road surface crack detection</article-title>,&#x201d; in <conf-name>Proceedings of the 2020 IEEE Region 10 Symposium (TENSYMP)</conf-name>, <conf-loc>Dhaka, Bangladesh</conf-loc>, <conf-date>June 2020</conf-date>. <pub-id pub-id-type="doi">10.1109/tensymp50017.2020.9231023</pub-id> </citation>
</ref>
<ref id="B24">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Otero</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Moyou</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Peter</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Otero</surname>
<given-names>C. E.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Towards a remote sensing system for railroad bridge inspections: A concrete crack detection component</article-title>,&#x201d; in <conf-name>Proceedings of the SoutheastCon 2018</conf-name>, <conf-loc>FL, USA</conf-loc>, <conf-date>April 2018</conf-date>. <pub-id pub-id-type="doi">10.1109/secon.2018.8478856</pub-id> </citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Cervone</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Detection of asphalt pavement potholes and cracks based on the unmanned aerial vehicle multispectral imagery</article-title>. <source>IEEE J. Sel. Top. Appl. Earth Obs. Remote Sens.</source> <volume>11</volume> (<issue>10</issue>), <fpage>3701</fpage>&#x2013;<lpage>3712</lpage>. <pub-id pub-id-type="doi">10.1109/jstars.2018.2865528</pub-id> </citation>
</ref>
<ref id="B26">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Ping</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Gao</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>A deep learning approach for street pothole detection</article-title>,&#x201d; in <conf-name>Proceedings of the 2020 IEEE Sixth International Conference on Big Data Computing Service and Applications (BigDataService)</conf-name>, <conf-loc>Oxford, UK</conf-loc>, <conf-date>August 2020</conf-date>. <pub-id pub-id-type="doi">10.1109/bigdataservice49289.2020.00039</pub-id> </citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rajadurai</surname>
<given-names>R.-S.</given-names>
</name>
<name>
<surname>Kang</surname>
<given-names>S.-T.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Automated vision-based crack detection on concrete surfaces using deep learning</article-title>. <source>Appl. Sci.</source> <volume>11</volume> (<issue>11</issue>), <fpage>5229</fpage>. <pub-id pub-id-type="doi">10.3390/app11115229</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Seydi</surname>
<given-names>S. T.</given-names>
</name>
<name>
<surname>Rastiveis</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A deep learning framework for roads network damage assessment using POST-EARTHQUAKE Lidar Data</article-title>. <source>Int. Arch. Photogramm. Remote Sens. Spat. Inf. Sci.</source> <volume>XLII-4/W18</volume>, <fpage>955</fpage>&#x2013;<lpage>961</lpage>. <pub-id pub-id-type="doi">10.5194/isprs-archives-xlii-4-w18-955-2019</pub-id> </citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sizyakin</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Voronin</surname>
<given-names>V. V.</given-names>
</name>
<name>
<surname>Gapon</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Pi&#x17e;urica</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A deep learning approach to crack detection on road surfaces</article-title>. <source>Artif. Intell. Mach. Learn. Def. Appl. II</source> <volume>11543</volume>, <fpage>1</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1117/12.2574131</pub-id> </citation>
</ref>
<ref id="B30">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Ukhwah</surname>
<given-names>E. N.</given-names>
</name>
<name>
<surname>Yuniarno</surname>
<given-names>E. M.</given-names>
</name>
<name>
<surname>Suprapto</surname>
<given-names>Y. K.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Asphalt pavement pothole detection using Deep Learning method based on Yolo Neural Network</article-title>,&#x201d; in <conf-name>Proceedings of the 2019 International Seminar on Intelligent Technology and Its Applications (ISITIA)</conf-name>, <conf-loc>Surabaya, Indonesia</conf-loc>, <conf-date>August 2019</conf-date>. <pub-id pub-id-type="doi">10.1109/isitia.2019.8937176</pub-id> </citation>
</ref>
<ref id="B31">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Hoi</surname>
<given-names>S. C.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2014</year>). &#x201c;<article-title>Deep learning for content-based image retrieval</article-title>,&#x201d; in <conf-name>Proceedings of the 22nd ACM International Conference on Multimedia</conf-name>, <conf-loc>Boca Raton, FL</conf-loc>, <conf-date>January 2014</conf-date>. <pub-id pub-id-type="doi">10.1145/2647868.2654948</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zou</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Qi</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Autonomous damage segmentation and measurement of glazed tiles in historic buildings via Deep Learning</article-title>. <source>Computer-Aided Civ. Infrastructure Eng.</source> <volume>35</volume> (<issue>3</issue>), <fpage>277</fpage>&#x2013;<lpage>291</lpage>. <pub-id pub-id-type="doi">10.1111/mice.12488</pub-id> </citation>
</ref>
<ref id="B33">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>Y. J.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Deep proposal and detection networks for road damage detection and classification</article-title>,&#x201d; in <conf-name>Proceedings of the 2018 IEEE International Conference on Big Data (Big Data)</conf-name>, <conf-loc>WA, USA</conf-loc>, <conf-date>December 2018</conf-date>. <pub-id pub-id-type="doi">10.1109/bigdata.2018.8622599</pub-id> </citation>
</ref>
</ref-list>
</back>
</article>