<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Mar. Sci.</journal-id>
<journal-title>Frontiers in Marine Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Mar. Sci.</abbrev-journal-title>
<issn pub-type="epub">2296-7745</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmars.2024.1356356</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Marine Science</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Deep learning-based phenotype classification of three ark shells: <italic>Anadara kagoshimensis</italic>, <italic>Tegillarca granosa</italic>, and <italic>Anadara broughtonii</italic>
</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Eiseul</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2638280"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Yang</surname>
<given-names>Seung-Min</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/670287"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Cha</surname>
<given-names>Jae-Eun</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2607785"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Jung</surname>
<given-names>Dae-Hyun</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2177292"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Kim</surname>
<given-names>Hae-Yeong</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/599409"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Institute of Life Sciences &amp; Resources, Kyung Hee University</institution>, <addr-line>Yongin</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Food Science and Biotechnology, Kyung Hee University</institution>, <addr-line>Yongin</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Department of Smart Farm Science, Kyung Hee University</institution>, <addr-line>Yongin</addr-line>, <country>Republic of Korea</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Haiyong Zheng, Ocean University of China, China</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Luigia Donnarumma, University of Naples Parthenope, Italy</p>
<p>Zhiguo Dong, Jiangsu Ocean University, China</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Dae-Hyun Jung, <email xlink:href="mailto:daehyun@khu.ac.kr">daehyun@khu.ac.kr</email>; Hae-Yeong Kim, <email xlink:href="mailto:hykim@khu.ac.kr">hykim@khu.ac.kr</email>
</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>08</day>
<month>04</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>11</volume>
<elocation-id>1356356</elocation-id>
<history>
<date date-type="received">
<day>15</day>
<month>12</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>03</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Kim, Yang, Cha, Jung and Kim</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Kim, Yang, Cha, Jung and Kim</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>The rapid and accurate classification of aquatic products is crucial for ensuring food safety, production efficiency, and economic benefits. However, traditional manual methods for classifying ark shell species based on phenotype are time-consuming and inefficient, especially during peak seasons when the demand is high and labor is scarce. This study aimed to develop a deep learning model for the automated identification and classification of commercially important three ark shells (<italic>Tegillarca granosa</italic>, <italic>Anadara broughtonii</italic>, and <italic>Anadara kagoshimensis</italic>) from images. The ark shells were collected and identified using a polymerase chain reaction method developed in a previous study, and a total of 1,400 images were categorized into three species. Three convolutional neural network (CNN) models, Visual Geometry Group Network (VGGnet), Inception-Residual Network (ResNet), and SqueezeNet, were then applied to two different classification sets, Set-1 (four bivalve species) and Set-2 (three ark shell species). Our results showed that SqueezeNet demonstrated the highest accuracy during the training phase for both classification sets, whereas Inception-ResNet exhibited superior accuracy during the validation phase. Similar results were obtained after developing a third classification set (Set-3) to classify six categories by combining Set-1 and Set-2. Overall, the developed CNN-based classification model exhibited a performance comparable or superior to that presented in previous studies and can provide a theoretical basis for bivalve classification, thereby contributing to improved food safety, production efficiency, and economic benefits in the aquatic products industry.</p>
</abstract>
<kwd-group>
<kwd>
<italic>Anadara kagoshimensis</italic>
</kwd>
<kwd>
<italic>Tegillarca granosa</italic>
</kwd>
<kwd>
<italic>Anadara broughtonii</italic>
</kwd>
<kwd>convolutional neural networks</kwd>
<kwd>food fraud</kwd>
<kwd>image classification</kwd>
</kwd-group>
<counts>
<fig-count count="6"/>
<table-count count="1"/>
<equation-count count="2"/>
<ref-count count="30"/>
<page-count count="10"/>
<word-count count="4680"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Ocean Observation</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Ark shells belong to the phylum Mollusca, class Bivalvia, order Arcida, and family Arcidae. Among the various ark shell species, granular ark (<italic>Tegillarca granosa</italic> (Linnaeus, 1758)), broughton&#x2019;s ribbed ark (<italic>Anadara broughtonii</italic> (Schrenck, 1867)), and half-crenate ark (<italic>Anadara kagoshimensis</italic> (Tokunaga, 1906)) are currently commercially important for the fishery industry (<xref ref-type="bibr" rid="B15">Lee et&#xa0;al., 2022</xref>) and can be found throughout the Indo-Pacific region. These ark shell species, which live burrowed into sand or mud mainly within the intertidal zone at a depth of 1 to 2 m, play a crucial role in the community structure of coastal ecosystems and as critical economic resources for the fishery and aquaculture industries (<xref ref-type="bibr" rid="B30">Zhao et&#xa0;al., 2017</xref>). They are one of the most popular marine bivalves among consumers given their rich flavor and substantial nutritional benefits. In fact, they provide high-quality protein and vitamins, are low in fat, and contain a considerable amount of iron, which helps prevent anemia (<xref ref-type="bibr" rid="B28">Zha et&#xa0;al., 2022</xref>). The worldwide production of ark shells has been estimated to be approximately 591,000 tons per year, representing a value of nearly $600 million (<xref ref-type="bibr" rid="B13">Kong et&#xa0;al., 2020</xref>). In Korea, the production of ark shells has been to exceed 12,600 tons in 2019 (<ext-link ext-link-type="uri" xlink:href="https://www.mof.go.kr/">https://www.mof.go.kr/</ext-link>). Considering their ecological and economic importance as marine bivalves, they have been the subject of research efforts.</p>
<p>Globally, the aquaculture industry has continued to heavily rely on human judgment, manual labor, and environmental factors, or at most, employs partially automated or mechanized systems (<xref ref-type="bibr" rid="B2">Benjakul and Saetang, 2022</xref>). In response to these challenges, various strategies have been proposed to digitize the sector (<xref ref-type="bibr" rid="B22">Simonyan and Zisserman, 2015</xref>). Particularly in the distribution stage of aquatic resources, image recognition technology has been employed to leverage smartphones in identifying fish species or assessing their status based on image data (<xref ref-type="bibr" rid="B27">Yang et&#xa0;al., 2021</xref>; <xref ref-type="bibr" rid="B12">Knausg&#xe5;rd et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B16">Li et&#xa0;al., 2023</xref>). Traditional image recognition technology has been used to perform contouring and indexing based on the unique characteristics of the object being classified (<xref ref-type="bibr" rid="B4">Deep and Dash, 2019</xref>). One of the popular techniques used to categorize aquatic resources is the polygon approximation algorithm, which involves selecting the start and end points of a segment and deciding whether to include a dominant point. Although this approach has allowed for the classification of aquatic resources with distinct outline characteristics, such as fish, shellfish, and starfish, it has some limitations when distinguishing detailed species within the fish or shellfish categories (<xref ref-type="bibr" rid="B25">Villon et&#xa0;al., 2018</xref>). Since 2010, the field of artificial intelligence has seen rapid advancements, particularly with regard to deep learning algorithms used in image processing (<xref ref-type="bibr" rid="B18">Rasheed, 2021</xref>). Considering their superior performance and broad applicability, deep learning algorithms have been ubiquitously employed across various industry sectors. Numerous examples of their applications can also be found in the fisheries industry (<xref ref-type="bibr" rid="B27">Yang et&#xa0;al., 2021</xref>; <xref ref-type="bibr" rid="B20">Saleh et&#xa0;al., 2022</xref>). Typically, the datasets used for artificial intelligence models that classify aquatic resources, particularly fish and shellfish, primarily focus on resources with characteristics distinct enough to be identified visually by humans (<xref ref-type="bibr" rid="B29">Zhang et&#xa0;al., 2023</xref>). However, for specific granular ark breeds that exhibit three similar phenotypes, a targeted model development focusing specifically on these three types could be considered more appropriate than incorporating them into the existing shellfish classification dataset. Traditionally, experts have relied on the shape and count of radial ribs to visually differentiate these species. However, this approach this method is not only challenging in achieving precise differentiation but also labor-intensive, requiring accurate classification assessment.</p>
<p>Therefore, the primary objective of this study was to validate whether genetic testing, in conjunction with genetic analysis, could be used for the imaging-based classification and differentiation of three distinct ark shell species. More precisely, this study employed a deep learning model, underpinned by a convolutional neural network (CNN) architecture, to classify the three species of ark shells, subsequently comparing the efficacies of the applied models to determine the most proficient one. To accomplish this, a verification group comprising three species of ark shells and four other bivalves was classified with the intent of developing an image classification model. We then determined the performance of the most efficient model and, ultimately, sought to corroborate the differentiation of the three ark shell species through polymerase chain reaction (PCR) testing. This approach aimed to not only enhance the accuracy and efficiency of ark shell classification but also contribute to the broader field of mollusk research and biodiversity conservation.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Sample collection and image acquisition</title>
<p>Reference specimens of granular ark (<italic>Tegillarca granosa</italic>), broughton&#x2019;s ribbed ark (<italic>Anadara broughtonii</italic>), half-crenate ark (<italic>Anadara kagoshimensis</italic>), scallop (<italic>Patinopecten yessoensis</italic>), venus mactra (<italic>Mactra veneriformis</italic>), and venus clam (<italic>Cyclina sinensis</italic>) were obtained from the National Institute of Biological Resource (Incheon, Korea). The granular ark, broughton&#x2019;s ribbed ark, and half-crenate ark, scallop, venus mactra, and venus clam samples were collected by fish farms, fish auction markets, and fish markets across Korea. The bivalve species chosen for the experiment were of excellent quality, with no discernible flaws or damage. We opted to use RGB images obtained by smart phone considering their wide availability across various stages in the bivalve industry (<xref ref-type="bibr" rid="B10">Jayasundara et&#xa0;al., 2023</xref>). To ensure better generalization, two smartphones, an iPhone 11 Pro Max and a Samsung Galaxy S20+, with different camera were used. The specifications of the smartphone camera used were as follows: dimensions (iPhone, 1,440 &#xd7; 1,440; Galaxy, 1,440 &#xd7; 3,200), resolution (iPhone, 96 dpi; Galaxy, 525 ppi), ISO time (iPhone, 100; Galaxy, 100), f-stop (iPhone, f/1.6; Galaxy, f/1.8), and exposure time (iPhone, 1/60 s; Galaxy, 1/60 s). To ensure image consistency and prevent shadows, a background surface was used by fixing the camera at 50 cm above the bivalve samples during the image acquisition process.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Identification of three ark shells</title>
<p>The ark shells selected for the experiment were identified using a specific PCR method to accurately classify the samples. First, the shells of the ark shells were removed, after which the genomic DNA of the edible portion was extracted using DNeasy Blood &amp; Tissue Kit (Qiagen, Hilden, Germany) according to the manufacturer&#x2019;s instructions. Thereafter, <italic>T</italic>. <italic>granosa</italic>, <italic>A</italic>. <italic>broughtonii</italic>, and <italic>A</italic>. <italic>kagoshimensis</italic> were detected using ultrafast PCR with specific primers developed in our previous study (<xref ref-type="bibr" rid="B15">Lee et&#xa0;al., 2022</xref>). The primer sequences used for ultrafast PCR are shown in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Table&#xa0;1</bold>
</xref>. Based on genetic analysis, each image of ark shell was classified according to species.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Bivalves image dataset</title>
<p>When preparing the image dataset for the detailed species classification of ark shells, we initially gathered datasets for four bivalve species, namely scallop, venus mactra, venus clam, and ark shells that resembled ark shells in order to develop a model that could classify these species. Within this framework, we regarded the three species of ark shells as a single data group. The primary dataset used for classifying these four types of bivalves was collected from the top view of the specimens, as depicted in <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>. Building upon the results derived from the bivalve classification dataset, image data of specimens identified as ark shell were acquired for further classification into the three detailed ark shell species, as depicted in the following <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>. A distinctive physical feature of the ark shells is the presence of radial ribs, which resemble fan-shaped grooves (<xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>). We aimed to examine the conventional method of ark shell classification based on the count of these radial ribs. To accomplish this, the number of lines in 100 samples of each detailed species was counted.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>The classification dataset intended for testing the deep learning model applied in this study. Representative images of the dataset for the entire bivalves (scallop, venus mactra, venus clam, and ark shells: in the case of ark clam, all three detailed species are included).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-11-1356356-g001.tif"/>
</fig>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Phenotype characteristics of <italic>Anadara kagoshimensis</italic> (top), <italic>Anadara broughtonii</italic> (middle), and <italic>Tegillarca granosa</italic> (bottom).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-11-1356356-g002.tif"/>
</fig>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Development of deep learning models for image classification</title>
<sec id="s2_4_1">
<label>2.4.1</label>
<title>CNN models: visual geometry group network</title>
<p>
<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref> depicts the characteristics and inner workings of the VGG16 model. Accordingly, the model commences with an input layer that accommodates an image of a shape (224, 224, 3). The architecture then utilizes 13 convolutional layers designed to extract intricate features from the input images. Though small, the 3 &#xd7; 3 filters capture localized spatial correlations present within the image data, effectively simplifying the complexity of the image. Nonlinearity, a critical aspect of deep learning networks, is introduced by applying the Rectified Linear Unit activation function after each convolution operation. This process allows the network to model and learn more complex patterns within the data. Max pooling, a downsampling operation, is performed along the spatial dimensions of the image (width and height) through five layers within the model. Not all convolutional layers are followed by max pooling, thereby preserving certain high-resolution features. After the final max pooling layer, the architecture encompasses two fully connected layers, each possessing 4,096 nodes. These layers further model nonlinear combinations of high-level features derived from the output of the convolutional layers. Essentially, these fully connected layers function as classifiers that can be utilized for definitive classification. The architecture concludes with a softmax activation layer comprising 1,000 nodes, one for each possible image class within the model. The softmax function, which represents a probability distribution over the varied possible outcomes, delivers the final class prediction for the given input image.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>The structure of <bold>(A)</bold> VGGnet-19, <bold>(B)</bold> Inception-Resnet, and <bold>(C)</bold> Squeezenet used to develop the image classification model.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-11-1356356-g003.tif"/>
</fig>
</sec>
<sec id="s2_4_2">
<label>2.4.2</label>
<title>CNN models: Inception-ResNet</title>
<p>Inception-ResNet, a distinctive architecture within the CNN framework, seamlessly integrates the salient features of two pivotal networks, namely Inception and ResNet (<xref ref-type="bibr" rid="B7">He et&#xa0;al., 2016</xref>; <xref ref-type="bibr" rid="B24">Szegedy et&#xa0;al., 2017</xref>). The Inception-ResNet architecture amalgamates the advantages of Inception (efficient processing of images at multiple scales) and ResNet (ease of training deep networks) architectures. This is achieved by introducing residual connections within the Inception architecture. The Inception-ResNet architecture can be characterized as a series of stacked Inception modules, each supplemented with a shortcut connection that links the module&#x2019;s input to its output. Owing to this hybrid combination, the Inception-ResNet architecture has been considered an extraordinarily potent model for various computer vision tasks that is capable of efficiently processing images across diverse scales, courtesy of the incorporated Inception architecture, while ensuring relative ease in training, even for significantly deep networks, due to the integrated ResNet architecture (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>).</p>
</sec>
<sec id="s2_4_3">
<label>2.4.3</label>
<title>CNN models: SqueezeNet</title>
<p>SqueezeNet was designed to reduce the number of parameters and amount of memory required to store the model without sacrificing accuracy (<xref ref-type="bibr" rid="B14">Koonce, 2021</xref>; <xref ref-type="bibr" rid="B21">Sayed et&#xa0;al., 2021</xref>), which can be achieved through several strategies.</p>
<p>Use of 1 &#xd7; 1 filters: These filters have fewer parameters than 3 &#xd7; 3 filters and can be used to reduce and increase the number of channels in the network.</p>
<p>Decrease in number of input channels to 3 &#xd7; 3 filters: SqueezeNet decreases the number of input channels to 3 &#xd7; 3 filters, which are more computationally expensive than 1 &#xd7; 1 filters. This is accomplished through squeeze layers, which reduce the depth of the network using 1 &#xd7; 1 filters.</p>
<p>Downsampling late in the network: Downsampling is a technique used to reduce the spatial dimensions of the data. In SqueezeNet, downsampling is performed late in the network to ensure that the convolutional layers have large activation maps, which can increase the classification accuracy.</p>
<p>The basic building block of SqueezeNet is the Fire module, which consists of a squeeze layer followed by an expand layer (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>). The squeeze layer reduces the number of input channels using 1 &#xd7; 1 filters, whereas the expand layer increases the number of channels using a combination of 1 &#xd7; 1 and 3 &#xd7; 3 filters. Overall, SqueezeNet is an efficient and compact network that is ideal for circumstances in which memory and computational resources are limited but high accuracy is still required.</p>
</sec>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Model performance evaluation method</title>
<p>Based on the classification objectives, the proposed models were categorized into Classification Set-1, Classification Set-2, and Classification Set-3. Set-1 was designed to classify four types of bivalves (scallop, venus mactra, venus clam, and ark shells), whereas Set-2 was specifically engineered to distinguish between three species of the ark shells (granular ark, broughton&#x2019;s ribbed ark, and half-crenate ark). Finally, Set-3 was designed to amalgamate the classification capabilities of Set-1 and Set-2, thereby aiming to classify a total of six classes, encompassing the three broader bivalve categories and the three specific ark shells species. The development of these models holds the potential to significantly enhance the accuracy and efficiency of bivalve species classification tasks (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>).</p>
<p>The evaluation of the developed models in this study is a crucial component in ensuring their performance and reliability. Classification accuracy is the most straightforward evaluation metric. Accuracy is a metric that accounts for the situation in which the model infers two classification labels and predicts true as true and false as false, which can be expressed as <xref ref-type="disp-formula" rid="eq1">Equation&#xa0;(1)</xref>:</p>
<disp-formula id="eq1">
<label>(1)</label>
<mml:math display="block" id="M1">
<mml:mrow>
<mml:mi mathvariant="bold-italic">A</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi mathvariant="bold-italic">u</mml:mi>
<mml:mi mathvariant="bold-italic">r</mml:mi>
<mml:mi mathvariant="bold-italic">a</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi mathvariant="bold-italic">y</mml:mi>
<mml:mo mathvariant="bold">=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi mathvariant="bold-italic">T</mml:mi>
<mml:mi mathvariant="bold-italic">P</mml:mi>
<mml:mo mathvariant="bold">+</mml:mo>
<mml:mi mathvariant="bold-italic">T</mml:mi>
<mml:mi mathvariant="bold-italic">N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="bold-italic">T</mml:mi>
<mml:mi mathvariant="bold-italic">P</mml:mi>
<mml:mo mathvariant="bold">+</mml:mo>
<mml:mi mathvariant="bold-italic">F</mml:mi>
<mml:mi mathvariant="bold-italic">N</mml:mi>
<mml:mo mathvariant="bold">+</mml:mo>
<mml:mi mathvariant="bold-italic">F</mml:mi>
<mml:mi mathvariant="bold-italic">P</mml:mi>
<mml:mo mathvariant="bold">+</mml:mo>
<mml:mi mathvariant="bold-italic">T</mml:mi>
<mml:mi mathvariant="bold-italic">N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>True Positive (TP) predict the answer that is actually true as true (correct answer). False Positive (FP) predict the answer that is actually false as true (wrong answer). False Negative (FN) predict the answer that is actually true as false (wrong answer). True Negative (TN) predict the answer that is actually false as false (correct answer).</p>
<p>The F1 score is a statistic that defines the classification accuracy and recall rate, which are combined into a single statistic. Here, the harmonic average and not the standard average was determined. This ensures that the F1 score has a low value, comparable to precision and recall, which are close to 0. The equation for the F1 score is as <xref ref-type="disp-formula" rid="eq2">Equation (2)</xref>:</p>
<disp-formula id="eq2">
<label>(2)</label>
<mml:math display="block" id="M2">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">F</mml:mi>
<mml:mn mathvariant="bold">1</mml:mn>
</mml:msub>
<mml:mo mathvariant="bold">=</mml:mo>
<mml:mn mathvariant="bold">2</mml:mn>
<mml:mo mathvariant="bold">&#xb7;</mml:mo>
<mml:mfrac>
<mml:mn mathvariant="bold">1</mml:mn>
<mml:mrow>
<mml:mfrac>
<mml:mn mathvariant="bold">1</mml:mn>
<mml:mrow>
<mml:mi mathvariant="bold-italic">r</mml:mi>
<mml:mi mathvariant="bold-italic">e</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi mathvariant="bold-italic">a</mml:mi>
<mml:mi mathvariant="bold-italic">l</mml:mi>
<mml:mi mathvariant="bold-italic">l</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mo mathvariant="bold">+</mml:mo>
<mml:mfrac>
<mml:mn mathvariant="bold">1</mml:mn>
<mml:mrow>
<mml:mi mathvariant="bold-italic">p</mml:mi>
<mml:mi mathvariant="bold-italic">r</mml:mi>
<mml:mi mathvariant="bold-italic">e</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi mathvariant="bold-italic">i</mml:mi>
<mml:mi mathvariant="bold-italic">s</mml:mi>
<mml:mi mathvariant="bold-italic">i</mml:mi>
<mml:mi mathvariant="bold-italic">o</mml:mi>
<mml:mi mathvariant="bold-italic">n</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfrac>
<mml:mo mathvariant="bold">=</mml:mo>
<mml:mn mathvariant="bold">2</mml:mn>
<mml:mo mathvariant="bold">&#xb7;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi mathvariant="bold-italic">p</mml:mi>
<mml:mi mathvariant="bold-italic">r</mml:mi>
<mml:mi mathvariant="bold-italic">e</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi mathvariant="bold-italic">i</mml:mi>
<mml:mi mathvariant="bold-italic">s</mml:mi>
<mml:mi mathvariant="bold-italic">i</mml:mi>
<mml:mi mathvariant="bold-italic">o</mml:mi>
<mml:mi mathvariant="bold-italic">n</mml:mi>
<mml:mo mathvariant="bold">&#xb7;</mml:mo>
<mml:mi mathvariant="bold-italic">r</mml:mi>
<mml:mi mathvariant="bold-italic">e</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi mathvariant="bold-italic">a</mml:mi>
<mml:mi mathvariant="bold-italic">l</mml:mi>
<mml:mi mathvariant="bold-italic">l</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="bold-italic">p</mml:mi>
<mml:mi mathvariant="bold-italic">r</mml:mi>
<mml:mi mathvariant="bold-italic">e</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi mathvariant="bold-italic">i</mml:mi>
<mml:mi mathvariant="bold-italic">s</mml:mi>
<mml:mi mathvariant="bold-italic">i</mml:mi>
<mml:mi mathvariant="bold-italic">o</mml:mi>
<mml:mi mathvariant="bold-italic">n</mml:mi>
<mml:mo mathvariant="bold">+</mml:mo>
<mml:mi mathvariant="bold-italic">r</mml:mi>
<mml:mi mathvariant="bold-italic">e</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi mathvariant="bold-italic">a</mml:mi>
<mml:mi mathvariant="bold-italic">l</mml:mi>
<mml:mi mathvariant="bold-italic">l</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>The models were trained using a substantial dataset of bivalve images and validated using a separate, unseen set of images to ensure an unbiased evaluation. Performance was tested on not only an individual level (Classification Set-1 and Classification Set-2) but also a comprehensive level (Classification Set-3), providing insights into specific and generalized model performance. Notably, the models are not evaluated based solely on these metrics. Qualitative analysis of the predictions, through visual inspection of correctly and incorrectly classified images, can also contribute to the overall assessment of the models&#x2019; performance. This comprehensive evaluation methodology ensures the development of reliable and robust classification models that can function effectively in real data sample.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="s3_1">
<label>3.1</label>
<title>Classification of three ark shells by molecular technology</title>
<p>This study collected three ark shells (<italic>T</italic>. <italic>granosa</italic>, <italic>A</italic>. <italic>broughtonii</italic>, and <italic>A</italic>. <italic>kagoshimensis</italic>), which were identified using an ultrafast PCR method developed in our previous study to develop a deep learning model with accurate data. The ultrafast PCR method was applied to ark shells, with each primer showing an amplification plot for each sample (<xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>). The Ct values of the amplified products from each target species were 18.51, 23.11, and 22.62 for granular ark, half-crenate ark, and broughton&#x2019;s ribbed ark, respectively. The specific band was also amplified in the electrophoresis image (<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Figure&#xa0;1</bold>
</xref>). A total of 1,400 images were categorized into three species (<italic>T</italic>. <italic>granosa</italic>, <italic>A</italic>. <italic>broughtonii</italic>, and <italic>A</italic>. <italic>kagoshimensis</italic>) and used to validate deep learning.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Amplification plot for identifying <bold>(A)</bold> <italic>Tegillarca granosa</italic>, <bold>(B)</bold> <italic>Anadara broughtonii</italic>, and <bold>(C)</bold> <italic>Anadara kagoshimensis</italic>.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-11-1356356-g004.tif"/>
</fig>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>CNN classification performance</title>
<sec id="s3_2_1">
<label>3.2.1</label>
<title>Results for classification set-1</title>
<p>This study initially compared the accuracy of the classification of the four bivalves (scallop, venus mactra, venus clam, and ark shells) with the learning and verification accuracy of three CNN models. The results of each deep learning model applied to Classification Set-1 are presented in <xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5</bold>
</xref>, with the left side delineating the performance metrics obtained from the training data, whereas the right side delineating the corresponding metrics obtained from the test data. This structured presentation of results facilitates a comprehensive and comparative analysis of the model&#x2019;s performance across training and testing phases (<xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5A</bold>
</xref>, left), showcasing the changes in accuracy over iterations for three distinct CNN architectures: VGGnet, Inception-ResNet, and SqueezeNet. The accuracy results obtained during the training phase were as follows: VGGnet, 94.32%; Inception-ResNet, 96.55%; SqueezeNet, 97.23% (the highest reported).</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Results of each deep learning model for <bold>(A)</bold> Classification Set-1, <bold>(B)</bold> Classification Set-2, and <bold>(C)</bold> Classification Set-3. The left and right sides represent training data and test data, respectively.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-11-1356356-g005.tif"/>
</fig>
<p>To verify the reliability of the developed model, the test set was utilized for inference and the validation accuracy was subsequently calculated. During this validation phase, VGGnet, Inception-ResNet, and SqueezeNet achieved an accuracy of 91.12%, 95.41%, and 91.03%, respectively (<xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5A</bold>
</xref>, right). These results confirmed that all three models demonstrated promising performance, with SqueezeNet exhibiting the highest training accuracy and Inception-ResNet showing superior accuracy during the validation phase. This comprehensive performance evaluation provides valuable insights into the models&#x2019; capabilities.</p>
</sec>
<sec id="s3_2_2">
<label>3.2.2</label>
<title>Results for classification set-2</title>
<p>Classification Set-2, which was tailored to classify three detailed ark shell species, showed changes in accuracy over iterations for the three utilized CNN models, namely VGGnet, Inception-ResNet, and SqueezeNet, as demonstrated by its training performance in <xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5B</bold>
</xref> (left). The accuracy values attained by VGGnet, Inception-ResNet, and SqueezeNet during the training phase were 93.22%, 93.51%, 97.11%, respectively, with SqueezeNet outperforming the other two CNN models. Following the approach undertaken with Classification Set-1, the developed Classification Set-2 was also put through an inference process with the test set to ascertain its validation accuracy. The accuracy results for VGGnet, Inception-ResNet, and SqueezeNet obtained during this validation phase were 95.05%, 94.01%, and 97.78%, respectively. These outcomes affirm the proficiency of all three models, with SqueezeNet demonstrating the highest accuracy during the training phase, whereas Inception-ResNet being the most accurate during the validation phase. This extensive performance evaluation highlights the effectiveness of the models while also emphasizing the potential areas for enhancement in future iterations.</p>
</sec>
<sec id="s3_2_3">
<label>3.2.3</label>
<title>Results for classification set-3</title>
<p>Classification Set-3 was developed to classify six classes established by combining the classification classes in Classification Set-1 and Classification Set-2. As exhibited in <xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5C</bold>
</xref> (left), the performance of Classification Set-3 during training displayed an evolution in accuracy across iterations for the three distinct CNN models, VGGnet, Inception-ResNet, and SqueezeNet. The accuracy attained by VGGnet, Inception-ResNet, and SqueezeNet during the training phase were 89.91%, 92.48%, and 91.75%, respectively.</p>
<p>Similar to its predecessors, Classification Set-3 underwent an inference process with the test set to establish its validation accuracy. During the validation phase, VGGnet, Inception-ResNet, and SqueezeNet achieved an accuracy of 90.23%, 93.67%, and 89.16%, respectively. These results underscore the promising performance of all three models, with SqueezeNet demonstrating the highest training accuracy, whereas Inception-ResNet yielding superior accuracy during the validation phase.</p>
</sec>
<sec id="s3_2_4">
<label>3.2.4</label>
<title>F1 score value comparison result</title>
<p>In the evaluation of our deep learning model, accuracy and F1 scores were considered as key performance metrics. Although accuracy is a common measure for model performance, it can be misleading in cases where the dataset is imbalanced given that it does not consider the distribution of false positives and false negatives. Therefore, we also utilized the F1 score, which is a more robust measure for imbalanced datasets, given that it considers false positives and false negatives by calculating the harmonic mean of precision and recall. The SqueezeNet model performed slightly better than the VGGnet and Inception-ResNet models on Classification Set-1 and Set-2, with F1 scores of 0.91 and 0.89, respectively (<xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>). However, on Classification Set-3, the Inception-ResNet model outperformed the other two models with an F1 score of 0.91. Despite the relatively small differences in F1 scores, they can be significant depending on the specific application and the requirements for model performance.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>F1 scores of the validation sets of deep learning models.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Case (in validation&#xa0;set)</th>
<th valign="middle" align="left">VGGnet</th>
<th valign="middle" align="left">Inception-ResNet</th>
<th valign="middle" align="left">SqueezeNet</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Classification set-1</td>
<td valign="middle" align="left">0.84</td>
<td valign="middle" align="left">0.88</td>
<td valign="middle" align="left">0.91</td>
</tr>
<tr>
<td valign="middle" align="left">Classification set-2</td>
<td valign="middle" align="left">0.88</td>
<td valign="middle" align="left">0.88</td>
<td valign="middle" align="left">0.89</td>
</tr>
<tr>
<td valign="middle" align="left">Classification set-3</td>
<td valign="middle" align="left">0.85</td>
<td valign="middle" align="left">0.91</td>
<td valign="middle" align="left">0.84</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Classification of ark shells based on radial rib count</title>
<p>In traditional methods, detailed species of ark shells have been conventionally distinguished based on the count of the radial ribs. To investigate the difference in the number of radial ribs between the three species, the radial ribs in a sample size of 100 ark shells were counted. The half-crenate ark had an average radial rib count of 28.9 &#xb1; 1.92, broughton&#x2019;s ribbed ark exhibited 32.01 &#xb1; 1.89 ribs on average, and the granular ark presented an average count of 17.88 &#xb1; 1.23 radial ribs. <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6</bold>
</xref> illustrates the distribution of radial rib count for each class. Notably, overlaps in distribution were noted between the half-crenate ark and broughton&#x2019;s ribbed ark, whereas the granular ark was distinctly differentiated. Although this distinction can be somewhat discerned visually in the images, definitively distinguishing between the half-crenate ark and broughton&#x2019;s ribbed ark based on visual information alone requires a high level of expertise and judgment.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Distribution of radial rib count for the half-crenate ark, broughton&#x2019;s ribbed ark, and granular ark species. The black dot represents outliers in the data. The median is shown by a line inside the box. The standard deviation is not visually represented in the box plot.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-11-1356356-g006.tif"/>
</fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>Over the past few decades, consumer demands on the verification of the authenticity of aquatic products, detection of adulteration, and implementation of stricter controls on these issues have increased (<xref ref-type="bibr" rid="B19">Ren et&#xa0;al., 2023</xref>). Manual classification of ark shells based on phenotype cannot satisfy the demand created by shellfish production areas during peak seasons given the lack of labor (<xref ref-type="bibr" rid="B6">Ge et&#xa0;al., 2022</xref>). Moreover, this problem is compounded by the considerably limited corporate development due to rising labor costs and soaring product prices (<xref ref-type="bibr" rid="B5">Feng et&#xa0;al., 2021</xref>). With the demand for quality and efficiently produced aquatic products, more efficient and accurate approaches in sorting fish and shellfish are needed to improve the level of production automation (<xref ref-type="bibr" rid="B5">Feng et&#xa0;al., 2021</xref>). In recent years, computer and artificial intelligence technologies have developed rapidly to the point where computer vision has been widely applied in numerous fields of industrial production, including automotive, electrical machinery, food, logistics, and manufacturing industries (<xref ref-type="bibr" rid="B9">Jalal et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B23">Singh et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B11">Kim et&#xa0;al., 2023</xref>). By using computer vision to identify, locate, and subsequently sort scallops, production efficiency can be improved while ensuring the quality of the aquatic products.</p>
<p>This approach has been applied for the automated evaluation of aquatic products because deep learning networks have a strong capacity for learning and can extract deeper information from images acquired in the environment (<xref ref-type="bibr" rid="B7">He et&#xa0;al., 2016</xref>; <xref ref-type="bibr" rid="B17">Mukhiddinov et&#xa0;al., 2022</xref>). <xref ref-type="bibr" rid="B10">Jayasundara et&#xa0;al. (2023)</xref> presented two Neural Network architectures, to classify the quality grading of the Indian Sardinella and the Yellowfin Tuna using images (<xref ref-type="bibr" rid="B10">Jayasundara et&#xa0;al., 2023</xref>). Moreover, <xref ref-type="bibr" rid="B26">Vo et&#xa0;al. (2020)</xref> used the pre-trained Mask-RCNN model to determine the various attributes of lobsters, such as size, weight, and color, to achieve automated grading of the lobsters (<xref ref-type="bibr" rid="B26">Vo et&#xa0;al., 2020</xref>). More interestingly, given the outstanding classification performance of deep learning, researchers have applied the same to the gender classification of aquatic animals. For example, <xref ref-type="bibr" rid="B3">Cui et&#xa0;al. (2020)</xref> proposed an improved deep CNN model that can classify Chinese mitten crabs according to gender using images at an accuracy of 99% (<xref ref-type="bibr" rid="B3">Cui et&#xa0;al., 2020</xref>).</p>
<p>It is well established that several types of aquatic products share very close similarities among their species, making it difficult to distinguish them based on their morphological characteristics (<xref ref-type="bibr" rid="B16">Li et&#xa0;al., 2023</xref>). Although the external characteristics of aquatic products are recognizable, distinguishing them based on such is a time-consuming process. Given the high morphological similarity of fish species, <xref ref-type="bibr" rid="B1">Banan et&#xa0;al. (2020)</xref> developed a deep NN model for the identification of four carp species, common carp (<italic>Cyprinus carpio</italic> Linnaeus, 1758), grass carp (<italic>Ctenopharingodon Idella</italic> (Valenciennes, 1844)), bighead carp (<italic>Hypophthalmichthys nobilis</italic> (Richardson, 1845)), and silver carp (<italic>Hypophthalmichthys molitrix</italic> (Valenciennes, 1844)) (<xref ref-type="bibr" rid="B1">Banan et&#xa0;al., 2020</xref>).</p>
<p>Although deep learning technology has achieved remarkable results in the image classification of aquatic products, species classification still remains a major challenge, especially for shellfish species that are very morphologically similar (<xref ref-type="bibr" rid="B1">Banan et&#xa0;al., 2020</xref>). In general, broughton&#x2019;s ribbed ark has more radial ribs than the half-crenate ark. However, smaller specimens of broughton&#x2019;s ribbed ark may have a number of radial ribs similar to those of the half-crenate ark. In this context, one of the challenging aspects is that it is difficult to distinguish between the three types of ark shells based on the number of radial ribs alone. As shown in <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6</bold>
</xref>, the average counts of radial ribs for the half-crenate ark and broughton&#x2019;s ribbed ark are similar, and there is an overlap in their distributions, making visual discrimination alone challenging. Due to this morphological similarity, distinguishing between these ark shell species based solely on radial rib counts may not be definitive, and it may require a high level of expertise and judgment. Therefore, it can be concluded that a more accurate and effective method is needed to differentiate ark shell species with similar morphology.</p>
<p>For this reason, we developed a deep learning model for the automated identification and classification of three ark shells based on obtained images, thereby overcoming concerns regarding time consumption and inefficiency associated with traditional identification methods. Our experimental results showed that ark shells and other species of bivalves were classified at an accuracy of 95.30%, while the three types of ark shells were classified at an accuracy of 92.4%. Similar to ark shells, squid species share considerable morphological similarities. As such, <xref ref-type="bibr" rid="B8">Hu et&#xa0;al. (2020)</xref> proposed an efficient deconvolutional Neural Network to classify three squid species based on images, with the test sample archiving an accuracy of 85.7% (<xref ref-type="bibr" rid="B8">Hu et&#xa0;al., 2020</xref>). Our experimental results showed that the classification performance of the CNN model was comparable to or better than that presented in previous studies and that the developed method can be applied to other bivalves that share similar morphological characteristics among their species. Our CNN-based model that classifies images of three ark shells can provide a theoretical basis for bivalve classification and enable the tracking of the entire production process of ark shells from catching to selling with the support of big data, which is useful for improving food safety, production efficiency, and economic benefits.</p>
</sec>
<sec id="s5" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material</bold>
</xref>. Further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec id="s6" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>Ethical approval was not required for this study involving animals, as per local legislation and institutional guidelines, because the experiments were conducted only on species that are commonly used as food and intended for human consumption.</p>
</sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>EK: Conceptualization, Data curation, Writing &#x2013; original draft. S-MY: Investigation, Visualization, Writing &#x2013; review &amp; editing. J-EC: Writing &#x2013; review &amp; editing. D-HJ: Investigation, Methodology, Writing &#x2013; original draft. H-YK: Conceptualization, Funding acquisition, Supervision, Writing &#x2013; review &amp; editing.</p>
</sec>
</body>
<back>
<sec id="s8" sec-type="funding-information">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. This research was supported by a grant (22193MFDS471) from ministry of food and drug safety in 2023.</p>
</sec>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author(s) declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec id="s10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s11" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fmars.2024.1356356/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fmars.2024.1356356/full#supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet_1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Banan</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Nasiri</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Taheri-Garavand</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Deep learning-based appearance features extraction for automated carp species identification</article-title>. <source>Aquac. Eng.</source> <volume>89</volume>, <elocation-id>102053</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aquaeng.2020.102053</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Benjakul</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Saetang</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Development of loop-mediated isothermal amplification (LAMP) assays for the rapid authentication of three swimming crab species</article-title>. <source>Foods</source> <volume>11</volume>, <elocation-id>2247</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/foods11152247</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cui</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Pan</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zou</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A gender classification method for Chinese mitten crab using deep convolutional neural network</article-title>. <source>Multimed. Tools Appl.</source> <volume>79</volume>, <fpage>7669</fpage>&#x2013;<lpage>7684</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11042-019-08355-w</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Deep</surname> <given-names>B. V.</given-names>
</name>
<name>
<surname>Dash</surname> <given-names>R.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Underwater fish species recognition using deep learning techniques</article-title>,&#x201d; in <conf-name>2019 6th International Conference on Signal Processing and Integrated Networks, SPIN 2019</conf-name>. <fpage>665</fpage>&#x2013;<lpage>669</lpage> (<publisher-loc>Noida, India</publisher-loc>: <publisher-name>IEEE</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.1109/SPIN.2019.8711657</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Feng</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Tao</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>E. J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Classification of shellfish recognition based on improved faster R-CNN framework of deep learning</article-title>. <source>Math. Probl. Eng.</source> <volume>2021</volume>, <fpage>1</fpage>&#x2013;<lpage>10</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1155/2021/1966848</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ge</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Dai</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>R.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A deep learning model applied to optical image target detection and recognition for the identification of underwater biostructures</article-title>. <source>Machines</source> <volume>10</volume>, <elocation-id>809</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/machines10090809</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>He</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Ren</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>Deep residual learning for image recognition</article-title>,&#x201d; in <conf-name>Proc. IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recognit</conf-name>, , <conf-date>2016-Decem</conf-date>. <fpage>770</fpage>&#x2013;<lpage>778</lpage>. <publisher-loc>(Las Vegas, Nevada, USA)</publisher-loc>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/CVPR.2016.90</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hu</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>W.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A rapid, low-cost deep learning system to classify squid species and evaluate freshness based on digital images</article-title>. <source>Fish. Res.</source> <volume>221</volume>, <elocation-id>105376</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.fishres.2019.105376</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jalal</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Salman</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Mian</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Shortis</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Shafait</surname> <given-names>F.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Fish detection and species classification in underwater environments using deep learning with temporal information</article-title>. <source>Ecol. Inform.</source> <volume>57</volume>, <elocation-id>101088</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ecoinf.2020.101088</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jayasundara</surname> <given-names>J. M. V. D. B.</given-names>
</name>
<name>
<surname>Ramanayake</surname> <given-names>R. M. L. S.</given-names>
</name>
<name>
<surname>Senarath</surname> <given-names>H. M. N. B.</given-names>
</name>
<name>
<surname>Herath</surname> <given-names>H. M. S. L.</given-names>
</name>
<name>
<surname>Godaliyadda</surname> <given-names>G. M. R. I.</given-names>
</name>
<name>
<surname>Ekanayake</surname> <given-names>M. P. B.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <article-title>Deep learning for automated fish grading</article-title>. <source>J. Agric. Food Res.</source> <volume>14</volume>, <elocation-id>100711</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jafr.2023.100711</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>S. M.</given-names>
</name>
<name>
<surname>Jung</surname> <given-names>D. H.</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>H. Y.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Differentiation between Weissella cibaria and Weissella confusa using machine-learning-combined MALDI-TOF MS</article-title>. <source>Int. J. Mol. Sci.</source> <volume>24</volume>, <elocation-id>11009</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/ijms241311009</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Knausg&#xe5;rd</surname> <given-names>K. M.</given-names>
</name>
<name>
<surname>Wiklund</surname> <given-names>A.</given-names>
</name>
<name>
<surname>S&#xf8;rdalen</surname> <given-names>T. K.</given-names>
</name>
<name>
<surname>Halvorsen</surname> <given-names>K. T.</given-names>
</name>
<name>
<surname>Kleiven</surname> <given-names>A. R.</given-names>
</name>
<name>
<surname>Jiao</surname> <given-names>L.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Temperate fish detection and classification: A deep learning based approach</article-title>. <source>Appl. Intell.</source> <volume>52</volume>, <fpage>6988</fpage>&#x2013;<lpage>7001</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10489-020-02154-9</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kong</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Kocot</surname> <given-names>K. M.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Qi</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Q.</given-names>
</name>
<etal/>
</person-group>. (<year>2020</year>). <article-title>Mitogenomics reveals phylogenetic relationships of Arcoida (Mollusca, Bivalvia) and multiple independent expansions and contractions in mitochondrial genome size</article-title>. <source>Mol. Phylogenet. Evol.</source> <volume>150</volume>, <elocation-id>106857</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ympev.2020.106857</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Koonce</surname> <given-names>B.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>SqueezeNet</article-title>,&#x201d; in <source>Convolutional Neural Networks with Swift for Tensorflow</source> (<publisher-name>Apress</publisher-name>, <publisher-loc>Berkeley, CA</publisher-loc>), <fpage>73</fpage>&#x2013;<lpage>85</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/978-1-4842-6168-2_7</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname> <given-names>G. Y.</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>S. M.</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>H. Y.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Rapid on-site identification for three Arcidae species (Anadara kagoshimensis, Tegillarca granosa, and Anadara broughtonii) using ultrafast PCR combined with direct DNA extraction</article-title>. <source>Foods</source> <volume>11</volume>, <elocation-id>2449</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/foods11162449</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Xu</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Deng</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Xiao</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Han</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Zheng</surname> <given-names>H.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Deep learning for visual recognition and detection of aquatic animals: A review</article-title>. <source>Rev. Aquac.</source> <volume>15</volume>, <fpage>409</fpage>&#x2013;<lpage>433</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/raq.12726</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mukhiddinov</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Muminov</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Cho</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Improved classification approach for fruits and vegetables freshness based on deep learning</article-title>. <source>Sensors</source> <volume>22</volume>, <elocation-id>8192</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s22218192</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Rasheed</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>A sustainable deep learning based computationally intelligent seafood monitoring system for fish species screening</article-title>,&#x201d; in <conf-name>Proceedings - International Conference on Artificial Intelligence of Things, ICAIoT 2021</conf-name>. <fpage>1</fpage>&#x2013;<lpage>6</lpage> (<publisher-loc>Nicosia, Turkey</publisher-loc>: <publisher-name>IEEE</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ICAIoT53762.2021.00008</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ren</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Geng</surname> <given-names>X.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <article-title>Rapid identification of fish species by laser-induced breakdown spectroscopy and Raman spectroscopy coupled with machine learning methods</article-title>. <source>Food Chem.</source> <volume>400</volume>, <elocation-id>134043</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.foodchem.2022.134043</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saleh</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Sheaves</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Rahimi Azghadi</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Computer vision and deep learning for fish classification in underwater habitats: A survey</article-title>. <source>Fish Fish.</source> <volume>23</volume>, <fpage>977</fpage>&#x2013;<lpage>999</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/faf.12666</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sayed</surname> <given-names>G. I.</given-names>
</name>
<name>
<surname>Soliman</surname> <given-names>M. M.</given-names>
</name>
<name>
<surname>Hassanien</surname> <given-names>A. E.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A novel melanoma prediction model for imbalanced data using optimized SqueezeNet by bald eagle search optimization</article-title>. <source>Comput. Biol. Med.</source> <volume>136</volume>, <elocation-id>104712</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compbiomed.2021.104712</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Simonyan</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zisserman</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>Very deep convolutional networks for large-scale image recognition</article-title>,&#x201d; in <conf-name>3rd Int. Conf. Learn. Represent. ICLR 2015 - Conf. Track Proc</conf-name>. <fpage>14</fpage>. <publisher-loc>(San Diego, California, USA)</publisher-loc>.</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Singh</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Taspinar</surname> <given-names>Y. S.</given-names>
</name>
<name>
<surname>Kursun</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Cinar</surname> <given-names>I.</given-names>
</name>
<name>
<surname>Koklu</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Ozkan</surname> <given-names>I. A.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Classification and analysis of pistachio species with pre-trained deep learning models</article-title>. <source>Electron</source> <volume>11</volume>, <elocation-id>981</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/electronics11070981</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Szegedy</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Ioffe</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Vanhoucke</surname> <given-names>V.</given-names>
</name>
<name>
<surname>Alemi</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Inception-v4, Inception-ResNet and the impact of residual connections on learning</article-title>. <source>Proc. AAAI Conf. Artif. Intell.</source> <volume>31</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.1609/aaai.v31i1.11231</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Villon</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Mouillot</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Chaumont</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Darling</surname> <given-names>E. S.</given-names>
</name>
<name>
<surname>Subsol</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Claverie</surname> <given-names>T.</given-names>
</name>
<etal/>
</person-group>. (<year>2018</year>). <article-title>A Deep learning method for accurate and fast identification of coral reef fishes in underwater images</article-title>. <source>Ecol. Inform.</source> <volume>48</volume>, <fpage>238</fpage>&#x2013;<lpage>244</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ecoinf.2018.09.007</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vo</surname> <given-names>S. A.</given-names>
</name>
<name>
<surname>Scanlan</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Turner</surname> <given-names>P.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>An application of convolutional neural network to lobster grading in the southern rock lobster supply chain</article-title>. <source>Food Control</source> <volume>113</volume>, <elocation-id>107184</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.foodcont.2020.107184</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Gao</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Dong</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Deep learning for smart fish farming: applications, opportunities and challenges</article-title>. <source>Rev. Aquac.</source> <volume>13</volume>, <fpage>66</fpage>&#x2013;<lpage>90</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/raq.12464</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zha</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Tang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Shi</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Bao</surname> <given-names>Y.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Impacts of four commonly used nanoparticles on the metabolism of a marine bivalve species, Tegillarca granosa</article-title>. <source>Chemosphere</source> <volume>296</volume>, <elocation-id>134079</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.chemosphere.2022.134079</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Yue</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Song</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Jia</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A High-similarity shellfish recognition method based on convolutional neural network</article-title>. <source>Inf. Process. Agric.</source> <volume>10</volume>, <fpage>149</fpage>&#x2013;<lpage>163</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.inpa.2022.05.009</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Shi</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Han</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Guo</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Fu</surname> <given-names>W.</given-names>
</name>
<etal/>
</person-group>. (<year>2017</year>). <article-title>Ocean acidification adversely influences metabolism, extracellular pH and calcification of an economically important marine bivalve, Tegillarca granosa</article-title>. <source>Mar. Environ. Res.</source> <volume>125</volume>, <fpage>82</fpage>&#x2013;<lpage>89</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.marenvres.2017.01.007</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>