<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Sens.</journal-id>
<journal-title>Frontiers in Sensors</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Sens.</abbrev-journal-title>
<issn pub-type="epub">2673-5067</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">654357</article-id>
<article-id pub-id-type="doi">10.3389/fsens.2021.654357</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Sensors</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Using Deep Learning Neural Network in Artificial Intelligence Technology to Classify Beef Cuts</article-title>
<alt-title alt-title-type="left-running-head">GC et&#x20;al.</alt-title>
<alt-title alt-title-type="right-running-head">Artificial Intelligence Classify Beef Cuts</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>GC</surname>
<given-names>Sunil</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Saidul Md</surname>
<given-names>Borhan</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Yu</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1324800/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Reed</surname>
<given-names>Demetris</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ahsan</surname>
<given-names>Mostofa</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Berg</surname>
<given-names>Eric</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1011847/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Sun</surname>
<given-names>Xin</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/958444/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<label>
<sup>1</sup>
</label>Department of Agricultural and Biosystems Engineering, North Dakota State University, <addr-line>Fargo</addr-line>, <addr-line>ND</addr-line>, <country>United&#x20;States</country>
</aff>
<aff id="aff2">
<label>
<sup>2</sup>
</label>College of Agricultural and Natural Resource Science, Sul Ross State University, <addr-line>Alpine</addr-line>, <addr-line>TX</addr-line>, <country>United&#x20;States</country>
</aff>
<aff id="aff3">
<label>
<sup>3</sup>
</label>Department of Computer Science, North Dakota State University, <addr-line>Fargo</addr-line>, <addr-line>ND</addr-line>, <country>United&#x20;States</country>
</aff>
<aff id="aff4">
<label>
<sup>4</sup>
</label>Department of Animal Sciences, North Dakota State University, <addr-line>Fargo</addr-line>, <addr-line>ND</addr-line>, <country>United&#x20;States</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1126460/overview">Maria Fernanda Silva</ext-link>, Universidad Nacional de Cuyo, Argentina</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/533289/overview">Ver&#xf3;nica Montes-Garc&#xed;a</ext-link>, Universit&#xe9; de Strasbourg, France</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1152467/overview">Shekh Md Mahmudul Islam</ext-link>, University of Dhaka, Bangladesh</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Xin Sun, <email>xin.sun@ndsu.edu</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>15</day>
<month>06</month>
<year>2021</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>2</volume>
<elocation-id>654357</elocation-id>
<history>
<date date-type="received">
<day>16</day>
<month>01</month>
<year>2021</year>
</date>
<date date-type="accepted">
<day>05</day>
<month>05</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2021 GC, Saidul Md, Zhang, Reed, Ahsan, Berg and Sun.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>GC, Saidul Md, Zhang, Reed, Ahsan, Berg and Sun</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these&#x20;terms.</p>
</license>
</permissions>
<abstract>
<p>The objective of this research was to evaluate the deep learning neural network in artificial intelligence (AI) technologies to rapidly classify seven different beef cuts (bone in rib eye steak, boneless rib eye steak, chuck steak, flank steak, New York strip, short rib, and tenderloin). Color images of beef samples were acquired from a laboratory-based computer vision system and collected from the Internet (Google Images) platforms. A total of 1,113 beef cut images were used as training, validation, and testing data subsets for this project. The model developed from the deep learning neural network algorithm was able to classify certain beef cuts (flank steak and tenderloin) up to 100% accuracy. Two pretrained convolution neutral network (CNN) models Visual Geometry Group (VGG16) and Inception ResNet V2 were used to train, validate, and test these models in classifying beef cut images. An image augmentation technique was incorporated in the convolution neutral network models for avoiding the overfitting problems, which demonstrated an improvement in the performance of the image classifier model. The VGG16 model outperformed the Inception ResNet V2 model. The VGG16 model coupled with data augmentation technique was able to achieve the highest accuracy of 98.6% on 116 test images, whereas Inception ResNet V2 accomplished a maximum accuracy of 95.7% on the same test images. Based on the performance metrics of both models, deep learning technology evidently showed a promising effort for beef cuts recognition in the meat science industry.</p>
</abstract>
<kwd-group>
<kwd>beef cuts</kwd>
<kwd>classification</kwd>
<kwd>deep learning</kwd>
<kwd>neural network</kwd>
<kwd>artificial intelligence</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<title>Highlights</title>
<p>
<list list-type="simple">
<list-item>
<p>1. Using TensorFlow deep learning neural network to classify beef&#x20;cuts.</p>
</list-item>
<list-item>
<p>2. Study of the artificial intelligence application in the meat science industry.</p>
</list-item>
<list-item>
<p>3. Validation of a prediction model with high prediction accuracy (up to 100%) for the beef cuts category.</p>
</list-item>
</list>
</p>
</sec>
<sec id="s2">
<title>Introduction</title>
<p>Modern consumers are becoming more interested about the production story for the foods they select and put on their dinner plate. Information regarding the source of the food, nutrition, and product quality has become an important purchase decision factor as household incomes increase. For example, beef quality has been an important factor evidenced by consumers&#x2019; willingness to pay a premium for tender steaks (<xref ref-type="bibr" rid="B40">Lusk et&#x20;al., 2001</xref>; <xref ref-type="bibr" rid="B32">Kukowski et&#x20;al., 2005</xref>). Other studies have shown that nutrient and meat quality profiles are related to muscle fiber composition and the proximate composition of meat cuts (<xref ref-type="bibr" rid="B29">Jung et&#x20;al., 2015</xref>; <xref ref-type="bibr" rid="B30">Jung et&#x20;al., 2016</xref>).</p>
<p>Efforts have been made to profile individual beef muscles&#x2019; nutrition and palatability characteristics (<xref ref-type="bibr" rid="B27">Jeremiah et&#x20;al., 2003</xref>). Providing profile characteristics for different meat cuts allows the consumer to make a more informed purchase decision. <xref ref-type="bibr" rid="B48">Seggern and Gwartney (2005)</xref> identified muscles in the chuck and round that had the potential to be a value-added cut. Several new beef cuts were identified, including the flat iron, chuck eye steak, and Denver cut. The success of these innovative retail sub-primal cuts resulted in increased revenue for the beef industry by adding value to previously underutilized cuts of meat that often ended up as trim for ground beef. Cuts such as the flat iron steak became more and more popular, and it became increasingly apparent that consumers were not familiar with the new names and had difficulty identifying them in the retail case. Consumers can educate themselves regarding the different beef cuts by using charts produced by the U.S. Cattlemen&#x2019;s Beef Board and National Cattlemen&#x2019;s Beef Association that are available online or at the point of purchase. However, modern consumers use information from multiple sources regarding nutrition information for their healthy cooking methods and to identify the correct cuts that match the nutrition and palatability expectations. Obtaining accurate information is often time-consuming, and consumers are often directed to wrong information due to the lack of available information on beef cuts. Therefore, a fast, accurate objective technology is needed to recognize beef cuts information, so the consumers can obtain useful nutrition information for their health. In addition, the meatpacking industry can use this kind of novel technology to put correct cuts/nutrition information on the meat package.</p>
<p>Artificial intelligence (AI) has been used to recognize different targets such as text/words, expression of disease, food identification, and identity authentication system (<xref ref-type="bibr" rid="B16">Curtis, 1987</xref>; <xref ref-type="bibr" rid="B9">Anwar and Ahmad, 2016</xref>; <xref ref-type="bibr" rid="B10">Bai, 2017</xref>; <xref ref-type="bibr" rid="B11">Buss, 2018</xref>; <xref ref-type="bibr" rid="B36">Liu et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B57">Sun et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B28">Jia et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B26">Islam et&#x20;al., 2019</xref>). Known for being efficient, accurate, consistent, and cost-effective, AI suits the meat industry&#x2019;s rapid mass production (<xref ref-type="bibr" rid="B39">Liu et&#x20;al., 2017</xref>). Recent studies showed that AI technology has great potential to detect marbling in beef and pork (<xref ref-type="bibr" rid="B15">Chmiel et&#x20;al., 2012</xref>; <xref ref-type="bibr" rid="B36">Liu et&#x20;al., 2018</xref>), fresh color of pork (<xref ref-type="bibr" rid="B56">Sun et&#x20;al., 2018</xref>), tenderness of beef (<xref ref-type="bibr" rid="B55">Sun et&#x20;al., 2012</xref>), and grading of beef fat color (<xref ref-type="bibr" rid="B13">Chen et&#x20;al., 2010</xref>). Moreover, <xref ref-type="bibr" rid="B47">Schmidhuber (2014)</xref> provided an overview of many deep learning neural networks used for pattern recognition relevant for several domains such as facial recognition (<xref ref-type="bibr" rid="B46">Russakovsky et&#x20;al., 2014</xref>), disaster recognition (<xref ref-type="bibr" rid="B38">Liu and Wu, 2016</xref>), and voice recognition (<xref ref-type="bibr" rid="B64">Wu et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B68">You et&#x20;al., 2018</xref>). Moreover, deep learning has been applied to sheep breeding classification (<xref ref-type="bibr" rid="B2">Abu et&#x20;al., 2019</xref>), food classification (<xref ref-type="bibr" rid="B24">Hnoohom and Yuenyong, 2018</xref>), bacon classification (<xref ref-type="bibr" rid="B65">Xiao et&#x20;al., 2019</xref>), classification of species in meat (<xref ref-type="bibr" rid="B7">Al-Sarayreh et&#x20;al., 2020</xref>), and the farming and food industries. The CNN is a popular deep learning tool, which has been used widely in classification problems. The most significant advantage of CNN is its automatic learning ability from an input image without feature extraction (<xref ref-type="bibr" rid="B23">Hinton et&#x20;al., 2006</xref>), but CNN requires a larger image data set to train the model from the scratch (<xref ref-type="bibr" rid="B31">Krizhevsky et&#x20;al., 2017</xref>). To overcome this situation, the transfer learning technique has been used in which a pretrained model is used in a new problem. Both VGG16 and Inception Res Network are the two popular CNN architecture models, which use the transfer learning approach to solve the image classification problems.</p>
<p>The ImageNet competition winner VGG16 model is also a CNN proposed by K. Simonyan and A. Zisserman from the University of Oxford in the article &#x201c;Very Deep Convolutional Networks for Large Scale Image Recognition.&#x201d; It makes an improvement over AlexNet by replacing 5&#x20;&#xd7; 5 kernel size by 3&#x20;&#xd7; 3&#x20;kernel-sized filters one after one. Practically, a stack of 5&#x20;&#xd7; 5 kernel is related to two 3&#x20;&#xd7; 3 kernels, and a 7&#x20;&#xd7; 7 kernel is equivalent to three 3&#x20;&#xd7; 3 kernels. In short, VGG16&#x2019;s nonlinear transformation increases the ability of a CNN to learn features in a better way. In the convolutional structure of VGGNet, a (1 &#xd7; 1) convolutional kernel is used, and without affecting the input and output dimensions, nonlinear transformation is introduced to increase the efficiency of a network and reduce calculations. During the training process, training is performed at the low-level layers, and then the weights of ImageNet are used to initialize the complex models that follow in order to speed up the convergence of training.</p>
<p>Another CNN-based model is the inception network, which is widely used in the field of artificial intelligence (<xref ref-type="bibr" rid="B17">Deng et&#x20;al., 2010</xref>; <xref ref-type="bibr" rid="B62">Too et&#x20;al., 2019</xref>). CNN classifiers perform better with a deep layer but face issues with overfitting, smaller kernel, and vanishing gradient. Inception networks reduce these problems with multiple filter sizes in the same level that is addressed as &#x201c;wider,&#x201d; rather than &#x201c;deeper&#x201d; in the case of neural network architecture. It performs convolution of an input with multiple different size filters such as 1&#x20;&#xd7; 1, 3&#x20;&#xd7; 3, and 5&#x20;&#xd7; 5 (<xref ref-type="bibr" rid="B59">Szegedy et&#x20;al., 2017</xref>). Inception Resnet V2 modifies the Inception network and reduces the computational cost with hyperparameter tuning of three major blocks.</p>
<p>To avoid the overfitting problem in limited image data sets, image augmentation technique could be applied to the image data set before feeding the image into the Inception Network and VGG16 architecture (<xref ref-type="bibr" rid="B42">Olsen et&#x20;al., 2019</xref>).The image augmentation technique is widely used to expand or enlarge existing image data sets artificially using different processing methods. To build a robust image classifier using very little training data, image augmentation is required to boost the performance of neural networks (<xref ref-type="bibr" rid="B4">Ahsan et&#x20;al., 2019</xref>). Widely used augmentations are random rotation, shifts, shear, flips, zooming, filtering etc. Random rotation replicates the original&#x20;image by rotation in between 0 and 360&#xb0;. Horizontal and&#x20;vertical shift move the pixels of original images by 2-dimensional direction. Flipping indicates reversing image data by row and columns. Zooming randomly zooms different parts of the input image. Different types of filtering help generate images from low light to brighter, low contrast to high contrast, and various saturation levels (<xref ref-type="bibr" rid="B49">Shijie et&#x20;al., 2017</xref>). We can also use different types of domain-specific functional processing and create augmented images to better perform image classifiers. Image augmentation is highly recommended for object detection, but the right processing choice is important for a robust model with a limited number of training input. An inappropriate selection of an augmentation technique can have a detrimental effect on the classifier. Multiple image augmentation techniques are preinstalled in the TensorFlow library (<xref ref-type="bibr" rid="B61">TensorFlow, nd</xref>) and there are functionalities to add user-defined techniques (<xref ref-type="bibr" rid="B52">Sokolova and Lapalme, 2009</xref>).</p>
<p>In 2016, the Google<sup>TM</sup> Brain team released a new deep learning neural network open-source software package called TensorFlow (<xref ref-type="bibr" rid="B1">Abadi et&#x20;al., 2016</xref>). This free, open-source deep learning algorithm library provides an effective, fast, and accurate source of artificial intelligence for industry applications (<xref ref-type="bibr" rid="B70">Zhang and Kagen, 2017</xref>; <xref ref-type="bibr" rid="B22">Han et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B14">Chen and Gong, 2019</xref>; <xref ref-type="bibr" rid="B44">Qin et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B54">Suen et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B63">V&#xe1;zquez-Canteli et&#x20;al., 2019</xref>). Furthermore, the TensorFlow backend model could be deployed to the mobile application and IoT devices using TensorFlow Lite (tflite) (<xref ref-type="bibr" rid="B21">Google Inc., nd</xref>), which is lean and fast to get real-time results. Some literature has used the tflite model file in a mobile application in their research (<xref ref-type="bibr" rid="B60">Tarale and Desai 2020</xref>; <xref ref-type="bibr" rid="B43">Pandya et&#x20;al., 2020</xref>). The CNN deep learning technologies using TesnorFlow has not yet been applied for the classification of meat&#x20;cuts.</p>
<p>The objective of this study was to develop a beef cut classification system based on off-the-shelf TensorFlow deep learning neural network coupled with the image augmentation technique, to measure prediction performance with images acquired in varying lighting and background conditions and processing levels, and to provide fast, accurate beef cuts information to the consumers and meat industry.</p>
</sec>
<sec sec-type="materials|methods" id="s3">
<title>Materials and Methods</title>
<sec id="s3-1">
<title>Beef Cuts Image Collection and Acquisition</title>
<p>A total of seven different types of retail beef cuts (<xref ref-type="fig" rid="F1">Figure&#x20;1</xref>) were used in the experiment, including rib steak, bone-in (IMPS 1103); rib eye steak, lip-On, boneless (IMPS 1112A); chuck eye roll steak (IMPS 1116D); flank steak (IMPS 193); strip loin steak, boneless (IMPS 1180); short ribs, bone-in (IMPS 1123); and tenderloin steak, center-cut, skinned (IMPS 1190B). All images used for training and testing the TensorFlow deep learning neural network were either obtained from available online image libraries, except for the boneless rib eye steaks, which were obtained from our existing image library (<xref ref-type="table" rid="T1">Table&#x20;1</xref>). Images for all seven beef cuts were obtained from various online and laboratory pictures with different backgrounds to simulate different environments that consumers would face to recognize the different beef cuts (<xref ref-type="fig" rid="F1">Figure&#x20;1</xref>). A total of 1113 image sets were randomly divided into training (80%), testing (10%), and validation (10%) data subsets. We strongly believe that a pretrained model with an unknown data set will produce a close and convincing result. One of the major practicalities of using transfer learning is using a pretrained model&#x2019;s weights, which carries out information on millions of images from ImageNet (<xref ref-type="bibr" rid="B46">Russakovsky et&#x20;al., 2014</xref>). This process not only consumes less time to train, validation, and testing the model but also improves the overall prediction and classification accuracy (<xref ref-type="bibr" rid="B67">Yim et&#x20;al., 2017</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Samples of beef cuts images for the TensorFlow deep learning algorithm. <bold>(A)</bold> Rib steak, bone in (IMPS 1103); <bold>(B)</bold> lip-on, boneless (IMPS 1112A); <bold>(C)</bold> chuck eye roll steak (IMPS 1116D); <bold>(D)</bold> flank steak (IMPS 193); <bold>(E)</bold> strip loin steak, boneless (IMPS 1180); <bold>(F)</bold> short ribs, bone in (IMPS 1123); <bold>(G)</bold> tenderloin steak, center-cut, skinned (IMPS 1190B). Picture Source: CATTLEMEN&#x0027;S BEEF BOARD AND NATIONAL CATTLEMEN&#x0027;S BEEF ASSOCIATION. <ext-link ext-link-type="uri" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://www.beefitswhatsfordinner.com">https://www.beefitswhatsfordinner.com</ext-link>
</p>
</caption>
<graphic xlink:href="fsens-02-654357-g001.tif"/>
</fig>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Beef cuts image category and quantity.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Beef retail cuts</th>
<th align="center">IMPS<xref ref-type="table-fn" rid="Tfn1">
<sup>a</sup>
</xref> number</th>
<th align="center">Number of beef cut images</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Rib steak, bone-in</td>
<td align="char" char=".">1103</td>
<td align="char" char=".">70</td>
</tr>
<tr>
<td align="left">Rib eye steak, lip-on, boneless</td>
<td align="char" char=".">1112A</td>
<td align="char" char=".">355</td>
</tr>
<tr>
<td align="left">Chuck eye roll steak</td>
<td align="char" char=".">1116D</td>
<td align="char" char=".">112</td>
</tr>
<tr>
<td align="left">Flank steak</td>
<td align="char" char=".">193</td>
<td align="char" char=".">104</td>
</tr>
<tr>
<td align="left">Strip loin steak, boneless</td>
<td align="char" char=".">1180</td>
<td align="char" char=".">193</td>
</tr>
<tr>
<td align="left">Short ribs, bone-in</td>
<td align="char" char=".">1123</td>
<td align="char" char=".">138</td>
</tr>
<tr>
<td align="left">Tenderloin steak, center-cut, skinned</td>
<td align="char" char=".">1190B</td>
<td align="char" char=".">141</td>
</tr>
<tr>
<td align="left">Total</td>
<td align="left"/>
<td align="char" char=".">1113</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="Tfn1">
<label>a</label>
<p>IMPS &#x3d; Institutional Meat Purchase Specifications.</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3-2">
<title>Incorporation of Image Augmentation to VGG16 and Inception ResNet V2 Architecture to Classify the Beef Cuts</title>
<p>An existing neural network model to perform a similar sorting task was used for the initial beef cut classification (<xref ref-type="fig" rid="F2">Figure&#x20;2</xref>). By using the lower layer of an already trained neural network for the image classification (commonly referred to as &#x201c;transfer learning&#x201d;) (<xref ref-type="bibr" rid="B12">Chang et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B45">Rawat and Wang 2017</xref>), the training of the new network became considerably faster and required fewer images. A previous study demonstrated that transfer learning combined with the image augmentation technique has increased the classification accuracy (<xref ref-type="bibr" rid="B4">Ahsan et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B50">Shorten and Khoshgoftaar, 2019</xref>). Therefore, the image augmentation technique has been applied to develop a VGG16 and Inception ResNet V2 model in beef cuts image data&#x20;sets (<xref ref-type="fig" rid="F3">Figure&#x20;3</xref>).</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Reusing existing deep neural network (DNN) model (include input, hidden, and output layers) for a similar task which result a new deep neural network model with the new adjusted weights value in hidden 3 and output layer (transfer learning).</p>
</caption>
<graphic xlink:href="fsens-02-654357-g002.tif"/>
</fig>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Flowchart showing different steps of DNN model development.</p>
</caption>
<graphic xlink:href="fsens-02-654357-g003.tif"/>
</fig>
<p>VGG16 (<xref ref-type="fig" rid="F4">Figure&#x20;4</xref>) and Inception ResNet V2 (<xref ref-type="fig" rid="F5">Figure&#x20;5</xref>) architecture were used to develop a meat classification model due to their strong performance on highly variable data sets and their availability or sources on Keras (an open-source software library for the artificial neural network) and TensorFlow backend. Besides this, it is easy to convert the model developed by this technique into the TensorFlow Lite (tflite) for the developing meat cut classification system. TensorFlow, Keras application program interface (API), and python libraries were used for image augmentation, VGG16 and Inception Resnet V2 model training, and testing and validation. Before initiating the training step of the Inception ResNet V2 and VGG16, the image augmentation technique was applied in input data set using Keras ImageDataGenerator API, which helps boost the model performance. ImageDatagenerator API generates more images in the data sets after the application of rescale, shear, shift, vertical flip, rotation, zoom, and horizontal flip. For this, rescale, shear_range, height_shift_range, vertical_flip, rotation_range, width_shift_range, zoom_range, and horizontal_flip values were set to 1./255,0.2,0.1, True, 20,0.1,0.2, and True, respectively, for training data generation, whereas only the rescale value was set to 1./255 for validation image generation.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>VGG16 standard architecture used in the experiments. The yellow high lightened cells are pooling layer where maxpooling happened and the last green cell represents the softmax activation function right after three fully connected dense layers. Other cell represents different convolution layers (S. <xref ref-type="bibr" rid="B37">Liu and Deng 2016</xref>).</p>
</caption>
<graphic xlink:href="fsens-02-654357-g004.tif"/>
</fig>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Inception ResNet V2 architecture adopted from <xref ref-type="bibr" rid="B58">Szegedy et&#x20;al., 2016</xref>. This is an upside down flow diagram of standard Inception ResNet V2 representing different convolution layer and filter. (<xref ref-type="bibr" rid="B58">Szegedy et&#x20;al. 2016</xref>).</p>
</caption>
<graphic xlink:href="fsens-02-654357-g005.tif"/>
</fig>
<p>After image augmentation, the VGG16 model and Inception ResNet V2 model were developed to detect seven types of beef cuts. The last three fully connected layers (<xref ref-type="fig" rid="F4">Figure&#x20;4</xref>) were followed by a softmax function (function which squashed the final layer&#x2019;s activations/logits into the range [0, 1] layers) to predict the multiclass labels (<xref ref-type="bibr" rid="B51">Simonyan and Zisserman 2014</xref>; <xref ref-type="bibr" rid="B4">Ahsan et&#x20;al., 2019</xref>). Every convolutional layer in the VGG16 followed by a Relu (rectified linear unit), which is one of the most widely used functions in neural networks to join Conv layers. It rescales the negative numbers from zero to the maximum positive number. Normalization was not used since it was not affecting the accuracy significantly. The input image started with 224&#x20;&#xd7; 224 as the output of image augmentation and then converted into three channels of RGB images, which were then processed into two hidden layers of 64 weights. Later, maxpooling reduced the sample size from 256 to 112, and again, this process was followed by another two convolutional layers with weights of 128. These weights kept increasing until they reached 512. Each convolutional layer is followed by maxpooling layers. At the end of the network, categorical cross-entropy with a softmax function, also called as softmax loss, was used. The adaptive moment estimation (Adam) optimizer was used to adjust the weights and reduce the overfitting. The Adam optimizer is one of the fastest stochastic gradient descent optimizers which calculate every parameter&#x2019;s learning rate first and then change and store the momentum changes (<xref ref-type="bibr" rid="B35">Li et&#x20;al., 2004</xref>; <xref ref-type="bibr" rid="B69">Zhang et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B5">Ahsan and Nygard, 2020</xref>). Similarly, the inception ResNet V2 model was developed without the application of normalization. Furthermore, the Adam optimizer was used to compare the performance with VGG16.</p>
</sec>
<sec id="s3-3">
<title>Mobile-Based Deep Learning Classification System Using TensorFlow Lite Model</title>
<p>Beef cuts data set was divided into the training, validation, and testing data subsets in order to build and evaluate a supervised deep learning model. Among these steps, the validation is the most important component of building a supervised model (<xref ref-type="bibr" rid="B66">Xu and Goodacre, 2018</xref>) because a model&#x2019;s performance is primarily judged based on validation data sets. In this classification model, an input data set was provided with names of the steak as desired output corresponding to a particular steak image as input data. For training the model, seven categories of steak images with proper labels were used. The training process was performed on 50 epochs with 32 steps per epoch for both models, which produced a model with low error and high accuracy. We set the batch size to 27 after multiple trials and errors, and total epochs per batch were set to 32 for both models. The initial learning rate was fine-tuned and adjusted based on the feedback of training accuracy during a learning event (<xref ref-type="bibr" rid="B19">Fan et&#x20;al., 2019</xref>). The validation batch size was set to 10 for both approaches. <xref ref-type="table" rid="T2">Table&#x20;2</xref> shows the steak type with their corresponding number of images used in training, validation, and testing with percentage of accuracy for the VGG16 and Inception ResNet V2 model. The training, testing, and validation were performed on a local machine (HP Omen 15t Laptop) with specification of 32 gigabytes of random-access memory (RAM), Core i9-9880H processor, and a GeForce RTX-2080 8&#xa0;GB GPU consisting of 2,944 compute unified device architecture (CUDA) cores. The Google Cloud Platform (GCP) was used to validate these experiments using a similar setup and data set. The Tesla p100 16&#xa0;GB GPU of GCP produced very close results, with a standard deviation of 0.001, which is negligible for reproducibility of the machine learning models (<xref ref-type="bibr" rid="B34">Learning and Zheng, 2015</xref>).</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Performance of VGG16 and Inception ResNet V2 models with varying numbers and types of steak used in the testing phase along with corresponding accuracies (%).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Steak type</th>
<th align="center">Training</th>
<th align="center">Validation</th>
<th align="center">Testing</th>
<th align="center">Val/test ratio (%)</th>
<th align="center">Train ratio (%)</th>
<th align="center">Val &#x2b; train &#x2b; test</th>
<th align="center">Random testing</th>
<th align="center">Testing accuracy of VGG16 (%)</th>
<th align="center">Testing accuracy of Inception ResNet V2 (%)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Bone in rib eye steak</td>
<td align="char" char=".">56</td>
<td align="char" char=".">7</td>
<td align="char" char=".">7</td>
<td align="char" char=".">10</td>
<td align="char" char=".">80</td>
<td align="char" char=".">70</td>
<td align="char" char=".">10</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
</tr>
<tr>
<td align="left">Boneless rib eye steak</td>
<td align="char" char=".">283</td>
<td align="char" char=".">36</td>
<td align="char" char=".">36</td>
<td align="char" char=".">10</td>
<td align="char" char=".">80</td>
<td align="char" char=".">355</td>
<td align="char" char=".">10</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
</tr>
<tr>
<td align="left">Chuck steak</td>
<td align="char" char=".">89</td>
<td align="char" char=".">11</td>
<td align="char" char=".">12</td>
<td align="char" char=".">10</td>
<td align="char" char=".">79</td>
<td align="char" char=".">112</td>
<td align="char" char=".">10</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
</tr>
<tr>
<td align="left">Flank steak</td>
<td align="char" char=".">83</td>
<td align="char" char=".">10</td>
<td align="char" char=".">11</td>
<td align="char" char=".">10</td>
<td align="char" char=".">80</td>
<td align="char" char=".">104</td>
<td align="char" char=".">10</td>
<td align="char" char=".">100</td>
<td align="char" char=".">90</td>
</tr>
<tr>
<td align="left">New York strip</td>
<td align="char" char=".">154</td>
<td align="char" char=".">19</td>
<td align="char" char=".">20</td>
<td align="char" char=".">10</td>
<td align="char" char=".">80</td>
<td align="char" char=".">193</td>
<td align="char" char=".">10</td>
<td align="char" char=".">100</td>
<td align="char" char=".">90</td>
</tr>
<tr>
<td align="left">Short rib</td>
<td align="char" char=".">110</td>
<td align="char" char=".">13</td>
<td align="char" char=".">15</td>
<td align="char" char=".">9</td>
<td align="char" char=".">80</td>
<td align="char" char=".">138</td>
<td align="char" char=".">10</td>
<td align="char" char=".">90</td>
<td align="char" char=".">90</td>
</tr>
<tr>
<td align="left">Tenderloin</td>
<td align="char" char=".">112</td>
<td align="char" char=".">14</td>
<td align="char" char=".">15</td>
<td align="char" char=".">10</td>
<td align="char" char=".">79</td>
<td align="char" char=".">141</td>
<td align="char" char=".">10</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In order to makes the model faster, the VGG16 and Inception ResNet V2 models in the Keras H5 format were converted into a TensorFlow Lite (tflite extension file). This format saves and preserves network architecture and configuration (which specifies what layers the model contains and how they are connected), a set of weights values, an optimizer, and a set of losses and metrics. To convert an H5 file format into the tflite format, TensorFlow TFLiteConverter API was used. Finally, the tflite model obtained was deployed on a mobile application, which was developed using a cross-platform framework called React Native.</p>
</sec>
</sec>
<sec sec-type="results" id="s4">
<title>Results</title>
<p>In this research, the selected VGG16 model with data augmentation technique combination was able to achieve the highest accuracy of 98.57% on 116 test images. However, the training accuracy reached 100% during experiments using 887 training and 110 validation images. The training process was performed on 50 epochs, and the loss factor was optimized by the Adam optimizer effectively from the first epoch. Since we set steps per epochs as 32, the training process helped to reach the global minima after 10 epochs. Based on the decrement of categorical cross-entropy the prediction from the softmax function was aligned with an actual class label. <xref ref-type="fig" rid="F6">Figure&#x20;6</xref> shows that the VGG16 training loss reached the lowest point during epoch 47 and the validation loss was lowest during epoch 45. Though the accuracy is the most intuitive performance measurement to observe the model&#x2019;s prediction ratio, the precision score is better to indicate the model&#x2019;s robustness (<xref ref-type="bibr" rid="B18">Denton et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B3">Ahsan et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B20">Gomes et&#x20;al., 2018</xref>). High precision indicates the model has low false-positive rates. From <xref ref-type="fig" rid="F7">Figure&#x20;7</xref>, we can observe that the false-positive rates of the VGG16 are very low on average. <xref ref-type="table" rid="T3">Table&#x20;3</xref> shows that sensitivity (recall) is relatively higher than the standard 50%. Also, the F1 score consistently follows the accuracy and precision and was always more than&#x20;94%.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Loss and accuracy graph for the VGG16 model. This result is based on 50 epochs and the step per epochs were set to&#x20;32.</p>
</caption>
<graphic xlink:href="fsens-02-654357-g006.tif"/>
</fig>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>VGG16 accuracy, F-1 score, precision, recall and false positive measures on test&#x20;data.</p>
</caption>
<graphic xlink:href="fsens-02-654357-g007.tif"/>
</fig>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Average test classification accuracy, F1 score, recall rate, precision, and false-positive rate for both Inception ResNet and VGG16.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th rowspan="2" align="left">Beef cuts</th>
<th colspan="2" align="center">Accuracy (%)</th>
<th colspan="2" align="center">F1 score (%)</th>
<th colspan="2" align="center">Precision (%)</th>
<th colspan="2" align="center">Recall (%)</th>
<th colspan="2" align="center">False positive (%)</th>
</tr>
<tr>
<th align="center">Inception</th>
<th align="center">VGG16</th>
<th align="center">Inception</th>
<th align="center">VGG16</th>
<th align="center">Inception</th>
<th align="center">VGG16</th>
<th align="center">Inception</th>
<th align="center">VGG16</th>
<th align="center">Inception</th>
<th align="center">VGG16</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Bone in rib eye steak</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">0</td>
<td align="char" char=".">0</td>
</tr>
<tr>
<td align="left">Boneless rib eye steak</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">0</td>
<td align="char" char=".">0</td>
</tr>
<tr>
<td align="left">Chuck steak</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">9.1</td>
<td align="char" char=".">0</td>
</tr>
<tr>
<td align="left">Flank steak</td>
<td align="char" char=".">90</td>
<td align="char" char=".">100</td>
<td align="char" char=".">94.7</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">90</td>
<td align="char" char=".">100</td>
<td align="char" char=".">0</td>
<td align="char" char=".">0</td>
</tr>
<tr>
<td align="left">New York strip</td>
<td align="char" char=".">90</td>
<td align="char" char=".">100</td>
<td align="char" char=".">94.7</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">90</td>
<td align="char" char=".">100</td>
<td align="char" char=".">0</td>
<td align="char" char=".">9.1</td>
</tr>
<tr>
<td align="left">Tenderloin</td>
<td align="char" char=".">90</td>
<td align="char" char=".">90</td>
<td align="char" char=".">94.7</td>
<td align="char" char=".">94.7</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">90</td>
<td align="char" char=".">90</td>
<td align="char" char=".">0</td>
<td align="char" char=".">0</td>
</tr>
<tr>
<td align="left">Short rib</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">100</td>
<td align="char" char=".">16.7</td>
<td align="char" char=".">0</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The Inception ResNet V2 was also implemented with the same parameter setup as the VGG16 on a similar data set. The early epochs showed promising results, but on average, the model did not perform better than VGG16. The accuracy and loss graph of Inception ResNet V2 (<xref ref-type="fig" rid="F7">Figure&#x20;7</xref>) showed inconsistency at different stages during the training process. Also, the false-positive rates and F1 score indicate that it failed to accurately predict all the Flank steak, New York strip, and tenderloin classes&#x2019; images. The primary assumption is that since the kernel size of Inception ResNet V2 is larger than that of VGG16, it failed to localize some inputs in detail information. The highest accuracy of Inception ResNet V2 on the testing data set was documented as 95.71% (<xref ref-type="fig" rid="F8">Figure&#x20;8</xref>), significantly or slightly lower than VGG16 accuracy (98.57%) for the same data set. The detailed comparison of both models&#x2019; key performance indicators (KPI) can be observed in <xref ref-type="table" rid="T3">Table&#x20;3</xref>.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Loss and accuracy graph for inception ResNet V2. This result is based on 50 epochs and the step per epochs were set to&#x20;32.</p>
</caption>
<graphic xlink:href="fsens-02-654357-g008.tif"/>
</fig>
<p>The TensorFlow deep learning neural network showed a great potential in recognizing and classifying beef cuts with reasonably good accuracy. Accuracy is the most intuitive performance measure and a simple way to observe the prediction ratio but often misleading when the data set has not symmetric false-negatives and false-positives (Powers, 2011). To further investigate our VGG16 model, we measured the ratio of correctly predicted positive observations as the precision score. High precision indicates low false-positive rates which is observed on the accuracy metrics graph (<xref ref-type="fig" rid="F9">Figure&#x20;9</xref>). The graphs also showed that the recall (sensitivity) is always above the standard value of 0.5 (<xref ref-type="bibr" rid="B3">Ahsan et&#x20;al., 2018</xref>). Since our class distribution is uneven, the F1 score is a useful metric to measure performance. Except the boneless rib eye steak, every class has a very good weighted average of precision and recall, which infers that our model is practical and reusable.</p>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>Inception ResNet V2 accuracy, F-1 score, precision, recall, and false positive measures on test&#x20;data.</p>
</caption>
<graphic xlink:href="fsens-02-654357-g009.tif"/>
</fig>
</sec>
<sec sec-type="discussion" id="s5">
<title>Discussion</title>
<p>Artificial intelligence (AI) utilizing the deep learning algorithm has potential to accurately classify different retail cuts of beef. Previously, researchers have successfully classified meat adulteration with better accuracy using support vector machine (SVM) and CNN from hyperspectral images (HSI) (<xref ref-type="bibr" rid="B6">Al-Sarayreh et&#x20;al., 2018</xref>). The feature extraction technique and model complexity were adequate for only HSI. However, these technologies were not applicable for compressed images such as images from the cellular phone, digital camera, and Internet as input. Object detection is also applied for meat cut traceability using radio frequency identification (RFID) and physical tagging which seem promising for block chain technology (<xref ref-type="bibr" rid="B33">Larsen et&#x20;al., 2014</xref>). Other than computer vision algorithms, different machine learning techniques are used by researchers to classify meat cut, which includes extensive feature extraction process and often hard to generalize. Some research has used a lot of noninvasive <italic>in vivo</italic> data which are collected from different categories to predict the meat cuts using the artificial neural network (ANN) and multiple linear regression (MLR) (<xref ref-type="bibr" rid="B8">Alves et&#x20;al., 2019</xref>). The ANN is proven useful for lean tissue detection in the early research application of AI in meat cut using a hybrid image segmentation technique to produce RMSE as low as 0.044 (<xref ref-type="bibr" rid="B25">Hwang et&#x20;al., 1997</xref>). These results are still not reusable as the sample size is only 40, which is very low for a neural network to be trained.</p>
<p>The beef cuts image classification system in this study was inspired by the convolutional neural network architecture based on the transfer learning approach (<xref ref-type="bibr" rid="B2">Abu et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B4">Ahsan et&#x20;al., 2019</xref>). An end-to-end open-source machine learning platform developed by the Google research team called TensorFlow was used in this study. The TensorFlow deep learning library has many advantages in that it is an open-source system that is easily access to the public. Therefore, the present study results can be incorporated into the open-source repository and made available to everyone interested in classifying beef cuts. A free-access and off-the-shelf deep learning neural network with AI technology was improved by the incorporating image augmentation technique and evaluated to classify seven different types of beef cut images quickly. Both VGG16 and Inception Resnet V2 architecture coupled with image augmentation techniques, namely, rotation, flip, and shift using TensorFlow and Keras libraries were able to successfully identify and classify the beef cut correctly over 96% of the time. This study demonstrated that higher classification accuracy can be achieved using the pretrained CNN model coupled with the image augmentation technique in beef cuts classification. VGG16 (98.6%) outperformed the Inception ResNet V2 (95.7%) model in terms of classification accuracy.</p>
<p>This research is the harbinger of an efficient AI-based meat cut system, which is merely depicted as a prototype model. Higher classification accuracy and easy deployment of the AI model in the backend application program interface (API) for any type of application (web or mobile) have proven the significance of AI in meat classification. The deep learning model developed in this research has shown the potential to be used in a phone application to provide consumers a real-time beef cut recognition tool in meat industries. Therefore, the model developed for beef cuts classification was converted into a tflite format and deployed in a mobile application. Later, some of the random images from Google were tested in mobile application. <xref ref-type="fig" rid="F10">Figure&#x20;10</xref> shows the beef cuts classification part on a mobile application. Based on the type of the beef cuts recipe information, the mobile application provides the recipe information for the consumer. <xref ref-type="bibr" rid="B53">Statista (2016)</xref> noted that by 2020, the number of global smart phone users will reach to 2.87 billion. Thus, anyone with a smartphone with Internet access will have access to this beef cut classification tool through a phone application platform. The seven beef cuts selected for this research were identified as the most popular beef cuts sold at local retail markets. Future classification training could be added to the model that includes additional beef retail cuts like those available in print or online from the beef cuts guide maintained by the National Cattlemen&#x27;s Beef Association (<xref ref-type="bibr" rid="B41">National Cattlemen&#x2019;s Beef Association 2012</xref>).</p>
<fig id="F10" position="float">
<label>FIGURE 10</label>
<caption>
<p>Mobile-based beef cut classification system using the TensorFlow Lite deep learning model. <bold>(A)</bold> Capturing the beef cuts images using a mobile camera. <bold>(B)</bold> Beef cut classification results for boneless rib eye steak from the mobile application.</p>
</caption>
<graphic xlink:href="fsens-02-654357-g010.tif"/>
</fig>
</sec>
</body>
<back>
<sec id="s6">
<title>Data Availability Statement</title>
<p>The raw data supporting the conclusion of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s7">
<title>Author Contributions</title>
<p>SG: main writer and original draft preparation; BS: review and editing; YZ: AI algorithm supervising and editing; DR: meat sample preparing and analysis; MA: AI algorithm testing and data analysis; EB: funding acquisition and editing; XS: project PI and editing.</p>
</sec>
<sec id="s8">
<title>Funding</title>
<p>This study was funded by the North Dakota Beef Commission (project number FAR0027501) and North Dakota State Board of Agricultural Research and Education (project number FARG090370).</p>
</sec>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Abadi</surname>
<given-names>Mart&#xed;n</given-names>
</name>
<name>
<surname>Agarwal</surname>
<given-names>Ashish</given-names>
</name>
<name>
<surname>Paul</surname>
<given-names>Barham</given-names>
</name>
<name>
<surname>Brevdo</surname>
<given-names>Eugene</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Zhifeng</given-names>
</name>
<name>
<surname>Craig</surname>
<given-names>Citro</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <source>&#x201c;TensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems,&#x201d; March</source>. <pub-id pub-id-type="doi">10.1145/2951913.2976746</pub-id> <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1603.04467">https://arxiv.org/abs/1603.04467</ext-link>. </citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Abu</surname>
<given-names>Jwade</given-names>
</name>
<name>
<surname>Sanabel</surname>
</name>
<name>
<surname>Guzzomi</surname>
<given-names>Andrew</given-names>
</name>
<name>
<surname>Ajmal Mian</surname>
</name>
</person-group> (<year>2019</year>). <article-title>On Farm Automatic Sheep Breed Classification Using Deep Learning</article-title>. <source>Computers and Electronics in Agriculture</source> <volume>167</volume> (<issue>December</issue>). <pub-id pub-id-type="doi">10.1016/j.compag.2019.105055</pub-id> </citation>
</ref>
<ref id="B3">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ahsan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Gomes</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Denton</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>SMOTE Implementation on Phishing Data to Enhance Cybersecurity</article-title>. <conf-name>IEEE International Conference on Electro Information Technology</conf-name> (<publisher-loc>Washington, DC</publisher-loc>: <publisher-name>IEEE Computer Society</publisher-name>), <fpage>531</fpage>&#x2013;<lpage>36</lpage>. <pub-id pub-id-type="doi">10.1109/EIT.2018.8500086</pub-id>
<comment>2018-May</comment>: </citation>
</ref>
<ref id="B4">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Ahsan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Gomes</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Denton</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>20192019-May</year>). &#x201c;<article-title>Application of a Convolutional Neural Network Using Transfer Learning for Tuberculosis Detection</article-title>,&#x201d; In <conf-name>IEEE International Conference on Electro Information Technology</conf-name> (<publisher-name>IEEE Computer Society</publisher-name>), <fpage>427</fpage>&#x2013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1109/EIT.2019.8833768</pub-id> </citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ahsan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Nygard</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Convolutional Neural Networks with LSTM for Intrusion Detection</article-title>.&#x201d; <comment>In</comment>. <pub-id pub-id-type="doi">10.29007/j35r</pub-id> </citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Al-Sarayreh</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Reis</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Yan</surname>
<given-names>W. Q.</given-names>
</name>
<name>
<surname>Klette</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Detection of Red-Meat Adulteration by Deep Spectral-Spatial Features in Hyperspectral Images</article-title>. <source>J.&#x20;Imaging</source> <volume>4</volume>, <fpage>63</fpage>. <pub-id pub-id-type="doi">10.3390/jimaging4050063</pub-id> </citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Al-Sarayreh</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Reis</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Yan</surname>
<given-names>W. Q.</given-names>
</name>
<name>
<surname>Klette</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Potential of Deep&#x20;Learning and Snapshot Hyperspectral Imaging for Classification of Species in Meat</article-title>. <source>Food Control</source> <volume>117</volume> (<issue>November</issue>). <pub-id pub-id-type="doi">10.1016/j.foodcont.2020.107332</pub-id> </citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Alves</surname>
<given-names>A. A. C.</given-names>
</name>
<name>
<surname>Chaparro Pinzon</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Costa</surname>
<given-names>R. M. d. C</given-names>
</name>
<name>
<surname>Silva</surname>
<given-names>M. S. d.</given-names>
</name>
<name>
<surname>Vieira</surname>
<given-names>E. H. M.</given-names>
</name>
<name>
<surname>Mendon&#xe7;a</surname>
<given-names>I. B. d.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Multiple Regression and Machine Learning Based Methods for Carcass Traits and Saleable Meat Cuts Prediction Using Non-Invasive <italic>in Vivo</italic> Measurements in Commercial Lambs</article-title>. <source>Small Ruminant Research</source> <volume>171</volume>, <fpage>49</fpage>&#x2013;<lpage>56</lpage>. <pub-id pub-id-type="doi">10.1016/j.smallrumres.2018.12.008</pub-id> </citation>
</ref>
<ref id="B9">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Anwar</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Ahmad</surname>
<given-names>S. S.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Use of Artificial Intelligence in Medical Sciences</article-title>. <conf-name>2016. Vision 2020: Innovation Management, Development Sustainability, And Competitive Economic Growth</conf-name>, <volume>Vols I - Vii</volume>, <fpage>415</fpage>-<lpage>422</lpage>. </citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bai</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>English speech recognition based on artificial intelligence</article-title>. <source>Agro Food Industry Hi-Tech</source> <volume>28</volume>, <fpage>2259</fpage>&#x2013;<lpage>2263</lpage>. </citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Buss</surname>
<given-names>D</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Food Companies Get Smart About Artificial Intelligence</article-title>. <source>Food Technol-Chicago</source> <volume>72</volume> (<issue>7</issue>), <fpage>26</fpage>&#x2013;<lpage>41</lpage>. </citation>
</ref>
<ref id="B12">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Chang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Han</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>H.-j.</given-names>
</name>
<name>
<surname>Park</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>A Method for Classifying Medical Images Using Transfer Learning: A Pilot Study on Histopathology of Breast Cancer</article-title>. In <conf-name>2017 IEEE 19th International Conference on E-Health Networking, Applications and Services (Healthcom)</conf-name>, <fpage>1</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1109/HealthCom.2017.8210843</pub-id> </citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Qin</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Color Grading of Beef Fat by Using Computer Vision and Support Vector Machine</article-title>. <source>Computers and Electronics in Agriculture</source> <volume>70</volume>, <fpage>27</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1016/j.compag.2009.08.006</pub-id> </citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Gong</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>9 Discrimination of breast tumors in ultrasonic images using an ensemble classifier based on TensorFlow framework with feature selection</article-title>. <source>Journal of Investigative Medicine</source> <volume>67</volume> (<issue>Suppl 1</issue>), <fpage>A3 LP</fpage>&#x2013;<lpage>A3</lpage>. <pub-id pub-id-type="doi">10.1136/jim-2019-000994.9</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chmiel</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Slowinski</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Dasiewicz</surname>
<given-names>Krzysztof</given-names>
</name>
<name>
<surname>Florowski</surname>
<given-names>T</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Application of a Computer Vision System to Classify Beef as Normal or Dark, Firm, and Dry</article-title>. <source>Journal of Animal Science</source> <volume>90</volume> (<issue>November</issue>), <fpage>4126</fpage>&#x2013;<lpage>30</lpage>. <pub-id pub-id-type="doi">10.2527/jas2011-502210.2527/jas.2011-5022</pub-id> </citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Curtis</surname>
<given-names>J.&#x20;W.</given-names>
</name>
</person-group> (<year>1987</year>). <article-title>Robotics and Artificial-Intelligence for the Food-Industry</article-title>. <source>Food Technol-Chicago</source> <volume>41</volume> (<issue>12</issue>), <fpage>62</fpage>&#x2013;<lpage>64</lpage>. </citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Deng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Dong</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Socher</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>L.-J.</given-names>
</name>
<name>
<surname>Kai Li</surname>
<given-names>Kai</given-names>
</name>
<name>
<surname>Li Fei-Fei</surname>
<given-names>Li</given-names>
</name>
</person-group>. (<year>2009</year>). &#x201c;<article-title>ImageNet: A Large-Scale Hierarchical Image Database</article-title>.&#x201d; <comment>In</comment>. <pub-id pub-id-type="doi">10.1109/cvpr.2009.5206848</pub-id> </citation>
</ref>
<ref id="B18">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Denton</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Ahsan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Franzen</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Nowatzki</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Multi-Scalar Analysis of Geospatial Agricultural Data for Sustainability</article-title>. In <conf-name>Proceedings - 2016 IEEE International Conference on Big Data, Big Data</conf-name>, <volume>2016</volume>
<publisher-name>(Washington, DC, USA: Institute of Electrical and Electronics Engineers Inc.)</publisher-name>, <fpage>2139</fpage>&#x2013;<lpage>46</lpage>. <pub-id pub-id-type="doi">10.1109/BigData.2016.7840843</pub-id> </citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fan</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zheng</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Deep Topology Network: A Framework Based on Feedback Adjustment Learning Rate for Image Classification</article-title>. <source>Advanced Engineering Informatics</source> <volume>42</volume> (<issue>October</issue>), <fpage>100935</fpage>. <pub-id pub-id-type="doi">10.1016/j.aei.2019.100935</pub-id> </citation>
</ref>
<ref id="B20">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Gomes</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Ahsan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Denton</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Random Forest Classifier in SDN Framework for User-Based Indoor Localization</article-title>
<conf-name>IEEE International Conference on Electro Information Technology</conf-name>
<publisher-name>IEEE Computer Society</publisher-name>, <fpage>537</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1109/EIT.2018.8500111</pub-id> </citation>
</ref>
<ref id="B21">
<citation citation-type="web">
<collab>Google Inc.</collab> (<year>n.d</year>). <article-title>TensorFlow Lite</article-title>. <ext-link ext-link-type="uri" xlink:href="https://www.tensorflow.org/lite">https://www.tensorflow.org/lite</ext-link> (<comment>Accessed October 29, 2020</comment>). </citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Han</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ren</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Du</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Ye</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Using the TensorFlow Deep Neural Network to Classify Mainland China Visitor Behaviours in Hong Kong from Check-in Data</article-title>. <source>Ijgi</source> <volume>7</volume>, <fpage>158</fpage>. <pub-id pub-id-type="doi">10.3390/ijgi7040158</pub-id> </citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hinton</surname>
<given-names>G. E.</given-names>
</name>
<name>
<surname>Osindero</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Teh</surname>
<given-names>Y.-W.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>A Fast Learning Algorithm for Deep Belief Nets</article-title>. <source>Neural Computation</source> <volume>18</volume> (<issue>7</issue>), <fpage>1527</fpage>&#x2013;<lpage>1554</lpage>. <pub-id pub-id-type="doi">10.1162/neco.2006.18.7.1527</pub-id> </citation>
</ref>
<ref id="B24">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Hnoohom</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Yuenyong</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Thai Fast Food Image Classification Using Deep Learning</article-title>. <conf-name>1st International ECTI Northern Section Conference on Electrical, Electronics, Computer and Telecommunications Engineering, ECTI-NCON</conf-name>, <volume>2018</volume>. (<publisher-loc>Chiang Rai, Thailand: Institute of Electrical and Electronics Engineers Inc</publisher-loc>), <fpage>116</fpage>&#x2013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.1109/ECTI-NCON.2018.8378293</pub-id> </citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hwang</surname>
<given-names>Heon</given-names>
</name>
<name>
<surname>Park</surname>
<given-names>Bosoon</given-names>
</name>
<name>
<surname>Nguyen</surname>
<given-names>Minh</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Yud Ren</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Hybrid Image Processing for Robust Extraction of Lean Tissue on Beef Cut Surfaces</article-title>. <source>Computers and Electronics in Agriculture</source>. <pub-id pub-id-type="doi">10.1016/s0168-1699(97)01321-510.1109/icce.1997.625891</pub-id> </citation>
</ref>
<ref id="B26">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Islam</surname>
<given-names>S. M. M.</given-names>
</name>
<name>
<surname>Rahman</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Prasad</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Boric-Lubecke</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Lubecke</surname>
<given-names>V. M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Identity Authentication System using a Support Vector Machine (SVM) on Radar Respiration Measurements, June 1). Identity Authentication System using a Support Vector Machine (SVM) on Radar Respiration Measurements</article-title>. <conf-name>93rd ARFTG Microwave Measurement Conference: Measurement Challenges for the Upcoming RF and Mm-Wave Communications and Sensing Systems, ARFTG 2019</conf-name>. <pub-id pub-id-type="doi">10.1109/ARFTG.2019.8739240</pub-id> </citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jeremiah</surname>
<given-names>L. E.</given-names>
</name>
<name>
<surname>Dugan</surname>
<given-names>M. E. R.</given-names>
</name>
<name>
<surname>Aalhus</surname>
<given-names>J.&#x20;L.</given-names>
</name>
<name>
<surname>Gibson</surname>
<given-names>L. L.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Assessment of the Chemical and Cooking Properties of the Major Beef Muscles and Muscle Groups</article-title>. <source>Meat Science</source> <volume>65</volume> (<issue>3</issue>), <fpage>985</fpage>&#x2013;<lpage>992</lpage>. <pub-id pub-id-type="doi">10.1016/S0309-1740(02)00308-X</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jia</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Qu</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Baranowski</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Burke</surname>
<given-names>L. E.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>H.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). &#x201c;<article-title>Automatic Food Detection in Egocentric Images Using Artificial Intelligence Technology</article-title>.&#x201d; <source>Public Health Nutr.</source> <volume>22</volume> (<issue>7</issue>): <fpage>1</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1017/S1368980018000538</pub-id> </citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jung</surname>
<given-names>E.-Y.</given-names>
</name>
<name>
<surname>Hwang</surname>
<given-names>Y.-H.</given-names>
</name>
<name>
<surname>Joo</surname>
<given-names>S.-T.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Chemical Components and Meat Quality Traits Related to Palatability of Ten Primal Cuts from Hanwoo Carcasses</article-title>. <source>Korean Journal for Food Science of Animal Resources</source> <volume>35</volume> (<issue>6</issue>), <fpage>859</fpage>&#x2013;<lpage>866</lpage>. <pub-id pub-id-type="doi">10.5851/kosfa.2015.35.6.859</pub-id> </citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jung</surname>
<given-names>E.-Y.</given-names>
</name>
<name>
<surname>Hwang</surname>
<given-names>Y.-H.</given-names>
</name>
<name>
<surname>Joo</surname>
<given-names>S.-T.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>The Relationship between Chemical Compositions, Meat Quality, and Palatability of the 10 Primal Cuts from Hanwoo Steer</article-title>. <source>Korean Journal for Food Science of Animal Resources</source> <volume>36</volume> (<issue>2</issue>), <fpage>145</fpage>&#x2013;<lpage>51</lpage>. <pub-id pub-id-type="doi">10.5851/kosfa.2016.36.2.145</pub-id> </citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Krizhevsky</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sutskever</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Hinton</surname>
<given-names>G. E.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>ImageNet Classification with Deep Convolutional Neural Networks</article-title>. <source>Commun. ACM</source> <volume>60</volume> (<issue>6</issue>), <fpage>84</fpage>&#x2013;<lpage>90</lpage>. <pub-id pub-id-type="doi">10.1145/3065386</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kukowski</surname>
<given-names>A. C.</given-names>
</name>
<name>
<surname>Maddock</surname>
<given-names>R. J.</given-names>
</name>
<name>
<surname>Wulf</surname>
<given-names>D. M.</given-names>
</name>
<name>
<surname>Fausti</surname>
<given-names>S. W.</given-names>
</name>
<name>
<surname>Taylor</surname>
<given-names>G. L.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Evaluating Consumer Acceptability and Willingness to Pay for Various Beef Chuck Muscles1</article-title>. <source>Journal of Animal Science</source> <volume>83</volume> (<issue>11</issue>), <fpage>2605</fpage>&#x2013;<lpage>2610</lpage>. <pub-id pub-id-type="doi">10.2527/2005.83112605x</pub-id> </citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Larsen</surname>
<given-names>A. B. L.</given-names>
</name>
<name>
<surname>Hviid</surname>
<given-names>M. S.</given-names>
</name>
<name>
<surname>J&#xf8;rgensen</surname>
<given-names>M. E.</given-names>
</name>
<name>
<surname>Larsen</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Dahl</surname>
<given-names>A. L.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Vision-Based Method for Tracking Meat Cuts in Slaughterhouses</article-title>. <source>Meat Science</source> <volume>96</volume>, <fpage>366</fpage>&#x2013;<lpage>372</lpage>. <pub-id pub-id-type="doi">10.1016/j.meatsci.2013.07.023</pub-id> </citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Learning</surname>
<given-names>Machine</given-names>
</name>
<name>
<surname>Zheng</surname>
<given-names>Alice</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Evaluating Machine Learning Models\nA Beginner&#x2019;s Guide to Key Concepts and Pitfalls</article-title>. <source>O&#x2019;Reilly</source>. </citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ogihara</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>A comparative study of feature selection and multiclass classification methods for tissue classification based on gene expression</article-title>. <source>Bioinformatics</source> <volume>20</volume>, <fpage>2429</fpage>&#x2013;<lpage>2437</lpage>. <pub-id pub-id-type="doi">10.1093/bioinformatics/bth267</pub-id> </citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>J.-H.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Young</surname>
<given-names>J.&#x20;M.</given-names>
</name>
<name>
<surname>Bachmeier</surname>
<given-names>L. A.</given-names>
</name>
<name>
<surname>Newman</surname>
<given-names>D. J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Predicting Pork Loin Intramuscular Fat Using Computer Vision System</article-title>. <source>Meat Science</source> <volume>143</volume> (<issue>September</issue>), <fpage>18</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1016/j.meatsci.2018.03.020</pub-id> </citation>
</ref>
<ref id="B37">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Deng</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Very Deep Convolutional Neural Network Based Image Classification Using Small Training Sample Size</article-title>. In <conf-name>Proceedings - 3rd IAPR Asian Conference on Pattern Recognition, ACPR 2015</conf-name>. <pub-id pub-id-type="doi">10.1109/ACPR.2015.7486599</pub-id> </citation>
</ref>
<ref id="B38">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Geological Disaster Recognition on Optical Remote Sensing Images Using Deep Learning</article-title>.&#x201d; In <source>Procedia Computer Science</source>, <volume>91</volume>, <fpage>566</fpage>&#x2013;<lpage>575</lpage>. (<publisher-name>Elsevier B.V</publisher-name>). <pub-id pub-id-type="doi">10.1016/j.procs.2016.07.144</pub-id> </citation>
</ref>
<ref id="B39">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Pu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>D.-W.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Hyperspectral Imaging Technique for Evaluating Food Quality and Safety during Various Processes: A Review of Recent Applications</article-title>, <source>Trends in Food Science and Technology</source>, <volume>69</volume>, <fpage>25</fpage>&#x2013;<lpage>35</lpage>. (<publisher-name>Elsevier Ltd</publisher-name>). <pub-id pub-id-type="doi">10.1016/j.tifs.2017.08.013</pub-id> </citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lusk</surname>
<given-names>J.&#x20;L.</given-names>
</name>
<name>
<surname>Fox</surname>
<given-names>J.&#x20;A.</given-names>
</name>
<name>
<surname>Schroeder</surname>
<given-names>T. C.</given-names>
</name>
<name>
<surname>Mintert</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Koohmaraie</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Koohmaraie</surname>
<given-names>Mohammad</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>In&#x2010;Store Valuation of Steak Tenderness</article-title>. <source>American Journal of Agricultural Economics</source> <volume>83</volume> (<issue>3</issue>), <fpage>539</fpage>&#x2013;<lpage>550</lpage>. <pub-id pub-id-type="doi">10.1111/0002-9092.00176</pub-id> </citation>
</ref>
<ref id="B41">
<citation citation-type="other">
<collab>National Cattlemen&#x27;s Beef Association</collab> (<year>2012</year>). <comment>Value-added cuts</comment>.</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Olsen</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Konovalov</surname>
<given-names>D. A.</given-names>
</name>
<name>
<surname>Philippa</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Ridd</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Wood</surname>
<given-names>J.&#x20;C.</given-names>
</name>
<name>
<surname>Johns</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>DeepWeeds: A Multiclass Weed Species Image Dataset for Deep Learning</article-title>. <source>Sci. Rep.</source> <volume>9</volume> (<issue>1</issue>), <fpage>2058</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-018-38343-3</pub-id> </citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pandya</surname>
<given-names>M. D.</given-names>
</name>
<name>
<surname>Jardosh</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Thakkar</surname>
<given-names>A.R.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>An Early-Stage Classification of Lung Nodules by an Android Based Application Using Deep Convolution Neural Network with Cost-Sensitive Loss Function and Progressive Scaling Approach</article-title>. <source>Ijatcse</source> <volume>9</volume> (<issue>2</issue>), <fpage>1316</fpage>&#x2013;<lpage>1323</lpage>. <pub-id pub-id-type="doi">10.30534/ijatcse/2020/63922020</pub-id> </citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qin</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Lei</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Kang</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Simulating and Predicting of Hydrological Time Series Based on TensorFlow Deep Learning</article-title>. <source>Pol. J.&#x20;Environ. Stud.</source> <volume>28</volume> (<issue>2</issue>), <fpage>795</fpage>&#x2013;<lpage>802</lpage>. <pub-id pub-id-type="doi">10.15244/pjoes/81557</pub-id> </citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rawat</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Deep Convolutional Neural Networks for Image Classification: A Comprehensive Review</article-title>. <source>Neural Computation</source> <volume>29</volume> (<issue>9</issue>), <fpage>2352</fpage>&#x2013;<lpage>2449</lpage>. <pub-id pub-id-type="doi">10.1162/neco_a_00990</pub-id> </citation>
</ref>
<ref id="B46">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Russakovsky</surname>
<given-names>Olga</given-names>
</name>
<name>
<surname>Jia</surname>
<given-names>Deng</given-names>
</name>
<name>
<surname>Su</surname>
<given-names>Hao</given-names>
</name>
<name>
<surname>Krause</surname>
<given-names>Jonathan</given-names>
</name>
<name>
<surname>Satheesh</surname>
<given-names>Sanjeev</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>Sean</given-names>
</name>
<etal/>
</person-group> (<year>2014</year>). <source>&#x201c;ImageNet Large Scale Visual Recognition Challenge,&#x201d; September</source>. <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/1409.0575">http://arxiv.org/abs/1409.0575</ext-link>.</citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schmidhuber</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2015</year>). <source>Deep Learning in Neural Networks: An Overview</source>, <volume>61</volume>, <fpage>85</fpage>&#x2013;<lpage>117</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2014.09.003</pub-id> </citation>
</ref>
<ref id="B48">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Seggern</surname>
<given-names>D. D. V.</given-names>
</name>
<name>
<surname>Calkins</surname>
<given-names>C. R.</given-names>
</name>
<name>
<surname>Johnson</surname>
<given-names>D. D.</given-names>
</name>
<name>
<surname>Brickler</surname>
<given-names>J.&#x20;E.</given-names>
</name>
<name>
<surname>Gwartney</surname>
<given-names>B. L.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Muscle Profiling: Characterizing the Muscles of the Beef Chuck and Round</article-title>. in <source>Meat Science</source> (<publisher-name>Elsevier</publisher-name>), <volume>71</volume>, <fpage>39</fpage>&#x2013;<lpage>51</lpage>. <pub-id pub-id-type="doi">10.1016/j.meatsci.2005.04.010</pub-id> </citation>
</ref>
<ref id="B49">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Shijie</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ping</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Peiyi</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Siping</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Research on Data Augmentation for Image Classification Based on Convolution Neural Networks</article-title>. In <conf-name>Proceedings - 2017 Chinese Automation Congress, CAC 2017</conf-name>. <pub-id pub-id-type="doi">10.1109/CAC.2017.8243510</pub-id> </citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shorten</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Khoshgoftaar</surname>
<given-names>T. M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A Survey on Image Data Augmentation for Deep Learning</article-title>. <source>J.&#x20;Big Data</source> <volume>6</volume> (<issue>1</issue>), <fpage>60</fpage>. <pub-id pub-id-type="doi">10.1186/s40537-019-0197-0</pub-id> </citation>
</ref>
<ref id="B51">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Simonyan</surname>
<given-names>Karen</given-names>
</name>
<name>
<surname>Zisserman</surname>
<given-names>Andrew</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Very Deep Convolutional Networks for Large-Scale Image Recognition</article-title>. <conf-name>3rd International Conference on Learning Representations, ICLR 2015 - Conference Track Proceedings, September</conf-name>. <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/1409.1556">http://arxiv.org/abs/1409.1556</ext-link>. </citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sokolova</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lapalme</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>A Systematic Analysis of Performance Measures for Classification Tasks</article-title>. <source>Information Processing &#x26; Management</source> <volume>45</volume>, <fpage>427</fpage>&#x2013;<lpage>437</lpage>. <pub-id pub-id-type="doi">10.1016/j.ipm.2009.03.002</pub-id> </citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Statista</surname>
</name>
</person-group> (<year>2016</year>). <article-title>Number of smartphone users worldwide from 2014 to 2020</article-title>. <comment>(in billions)</comment>. </citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Suen</surname>
<given-names>H.-Y.</given-names>
</name>
<name>
<surname>Hung</surname>
<given-names>K.-E.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>C.-L.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>TensorFlow-Based Automatic Personality Recognition Used in Asynchronous Video Interviews</article-title>. <source>IEEE Access</source> <volume>7</volume>, <fpage>61018</fpage>&#x2013;<lpage>61023</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2902863</pub-id> </citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>K. J.</given-names>
</name>
<name>
<surname>Maddock-Carlin</surname>
<given-names>K. R.</given-names>
</name>
<name>
<surname>Anderson</surname>
<given-names>V. L.</given-names>
</name>
<name>
<surname>Lepper</surname>
<given-names>A. N.</given-names>
</name>
<name>
<surname>Schwartz</surname>
<given-names>C. A.</given-names>
</name>
<etal/>
</person-group> (<year>2012</year>). <article-title>Predicting Beef Tenderness Using Color and Multispectral Image Texture Features</article-title>. <source>Meat Science</source> <volume>92</volume> (<issue>4</issue>), <fpage>386</fpage>&#x2013;<lpage>393</lpage>. <pub-id pub-id-type="doi">10.1016/j.meatsci.2012.04.030</pub-id> </citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Young</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>J.&#x20;H.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Newman</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Predicting Pork Color Scores Using Computer Vision and Support Vector Machine Technology</article-title>. <source>Meat and Muscle Biology</source> <volume>2</volume> (<issue>January</issue>), <fpage>296</fpage>. <pub-id pub-id-type="doi">10.22175/mmb2018.06.0015</pub-id> </citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Young</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>J.-H.</given-names>
</name>
<name>
<surname>Newman</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Prediction of Pork Loin Quality Using Online Computer Vision System and Artificial Intelligence Model</article-title>. <source>Meat Science</source> <volume>140</volume> (<issue>June</issue>), <fpage>72</fpage>&#x2013;<lpage>77</lpage>. <pub-id pub-id-type="doi">10.1016/j.meatsci.2018.03.005</pub-id> </citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Szegedy</surname>
<given-names>Christian</given-names>
</name>
<name>
<surname>Sergey Ioffe</surname>
</name>
<name>
<surname>Vincent</surname>
<given-names>Vanhoucke</given-names>
</name>
<name>
<surname>Alemi</surname>
<given-names>Alex</given-names>
</name>
</person-group>. (<year>2016</year>). &#x201c;<article-title>Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning</article-title>.&#x201d; <pub-id pub-id-type="doi">10.1109/cvpr.2016.308</pub-id> </citation>
</ref>
<ref id="B59">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Szegedy</surname>
<given-names>Christian</given-names>
</name>
<name>
<surname>Sergey Ioffe</surname>
</name>
<name>
<surname>Vincent</surname>
<given-names>Vanhoucke</given-names>
</name>
<name>
<surname>Alexander</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Alemi</surname>
</name>
</person-group> (<year>2017</year>). <conf-name>Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning.&#x201d; In 31st AAAI Conference on Artificial Intelligence, AAAI 2017</conf-name>.</citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tarale</surname>
<given-names>S. P.</given-names>
</name>
<name>
<surname>Desai</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Android Application for Recognition of Indian Origin Agricultural Products</article-title>. <source>Advances in Intelligent Systems and Computing</source> <volume>1154</volume>, <fpage>309</fpage>&#x2013;<lpage>323</lpage>. <pub-id pub-id-type="doi">10.1007/978-981-15-4032-5_29</pub-id> </citation>
</ref>
<ref id="B61">
<citation citation-type="web">
<collab>TensorFlow</collab> (<year>n.d</year>). <ext-link ext-link-type="uri" xlink:href="https://www.tensorflow.org/">https://www.tensorflow.org/</ext-link> (<comment>Accessed January 4, 2021</comment>).</citation>
</ref>
<ref id="B62">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Too</surname>
<given-names>E. C.</given-names>
</name>
<name>
<surname>Yujian</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Njuki</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yingchun</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A Comparative Study of Fine-Tuning Deep Learning Models for Plant Disease Identification</article-title>. <source>Computers and Electronics in Agriculture</source> <volume>161</volume>, <fpage>272</fpage>&#x2013;<lpage>279</lpage>. <pub-id pub-id-type="doi">10.1016/j.compag.2018.03.032</pub-id> </citation>
</ref>
<ref id="B63">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>V&#xe1;zquez-Canteli</surname>
<given-names>J.&#x20;R.</given-names>
</name>
<name>
<surname>Ulyanin</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>K&#xe4;mpf</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Nagy</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Nagy</surname>
<given-names>Zolt&#xe1;n</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Fusing TensorFlow with Building Energy Simulation for Intelligent Energy Management in Smart Cities</article-title>. <source>Sustainable Cities and Society</source> <volume>45</volume> (<issue>February</issue>), <fpage>243</fpage>&#x2013;<lpage>257</lpage>. <pub-id pub-id-type="doi">10.1016/j.scs.2018.11.021</pub-id> </citation>
</ref>
<ref id="B64">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Soraghan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Lowit</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Di-Caterina</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2018</year>). <source>A Deep Learning Method for Pathological Voice Detection Using Convolutional Deep Belief Networks</source>. <pub-id pub-id-type="doi">10.21437/Interspeech.2018-1351</pub-id> </citation>
</ref>
<ref id="B65">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Xiao</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Dong</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Xing</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Research on the Method of Hyperspectral and Image Deep Features for Bacon Classification</article-title>. <conf-name>Proceedings of the 31st Chinese Control and Decision Conference, CCDC 2019</conf-name>(<publisher-loc>Nanchang, China</publisher-loc>: <publisher-name>Nanchang, China: Institute of Electrical and Electronics Engineers Inc.</publisher-name>), <fpage>4682</fpage>&#x2013;<lpage>86</lpage>. <pub-id pub-id-type="doi">10.1109/CCDC.2019.8832581</pub-id> </citation>
</ref>
<ref id="B66">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Goodacre</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>On Splitting Training and Validation Set: A Comparative Study of Cross-Validation, Bootstrap and Systematic Sampling for Estimating the Generalization Performance of Supervised Learning</article-title>. <source>J.&#x20;Anal. Test.</source> <volume>2</volume> (<issue>3</issue>), <fpage>249</fpage>&#x2013;<lpage>262</lpage>. <pub-id pub-id-type="doi">10.1007/s41664-018-0068-2</pub-id> </citation>
</ref>
<ref id="B67">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Yim</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Joo</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bae</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>J</given-names>
</name>
</person-group>. (<year>2017</year>). &#x201c;<article-title>A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning</article-title>.&#x201d; In <conf-name>Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017</conf-name>. <pub-id pub-id-type="doi">10.1109/CVPR.2017.754</pub-id> </citation>
</ref>
<ref id="B68">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>You</surname>
<given-names>S. D.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>C.-H.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>W.-K.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Comparative Study of Singing Voice Detection Based on Deep Neural Networks and Ensemble Learning</article-title>. <source>Hum. Cent. Comput. Inf. Sci.</source> <volume>8</volume> (<issue>1</issue>), <fpage>34</fpage>. <pub-id pub-id-type="doi">10.1186/s13673-018-0158-1</pub-id> </citation>
</ref>
<ref id="B69">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Isola</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Efros</surname>
<given-names>A. A.</given-names>
</name>
<name>
<surname>Shechtman</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>O.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>The Unreasonable Effectiveness of Deep Features as a Perceptual Metric</article-title>.&#x201d; In <conf-name>Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition</conf-name>. <pub-id pub-id-type="doi">10.1109/CVPR.2018.00068</pub-id> </citation>
</ref>
<ref id="B70">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>Y. C.</given-names>
</name>
<name>
<surname>Kagen</surname>
<given-names>A. C.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Machine Learning Interface for Medical Image Analysis</article-title>. <source>J.&#x20;Digit Imaging</source> <volume>30</volume> (<issue>5</issue>), <fpage>615</fpage>&#x2013;<lpage>621</lpage>. <pub-id pub-id-type="doi">10.1007/s10278-016-9910-0</pub-id> </citation>
</ref>
</ref-list>
</back>
</article>