<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2023.1248598</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Plant Science</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Non-destructive detection of single-seed viability in maize using hyperspectral imaging technology and multi-scale 3D convolutional neural network</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Fan</surname><given-names>Yaoyao</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2357868"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>An</surname><given-names>Ting</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2323282"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wang</surname><given-names>Qingyan</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1690496"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Yang</surname><given-names>Guang</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Huang</surname><given-names>Wenqian</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wang</surname><given-names>Zheli</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1665650"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Zhao</surname><given-names>Chunjiang</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>*</sup></xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Tian</surname><given-names>Xi</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2136538"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>College of Information and Electrical Engineering, Shenyang Agricultural University</institution>, <addr-line>Shenyang</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Intelligent Equipment Research Center, Beijing Academy of Agriculture and Forestry Sciences</institution>, <addr-line>Beijing</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Jianwei Qin, Agricultural Research Service (USDA), United States</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Pappu Kumar Yadav, University of Florida, United States; Ebenezer Olaniyi, Mississippi State University, United States; Princess Tiffany D. Mendoza, Kansas State University, United States</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Chunjiang Zhao, <email xlink:href="mailto:Zhaocj@nercita.org.cn">Zhaocj@nercita.org.cn</email>; Xi Tian, <email xlink:href="mailto:tianx2019@sina.com">tianx2019@sina.com</email>
</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>29</day>
<month>08</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>14</volume>
<elocation-id>1248598</elocation-id>
<history>
<date date-type="received">
<day>27</day>
<month>06</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>11</day>
<month>08</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2023 Fan, An, Wang, Yang, Huang, Wang, Zhao and Tian</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Fan, An, Wang, Yang, Huang, Wang, Zhao and Tian</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>The viability of Zea mays seed plays a critical role in determining the yield of corn. Therefore, developing a fast and non-destructive method is essential for rapid and large-scale seed viability detection and is of great significance for agriculture, breeding, and germplasm preservation. In this study, hyperspectral imaging (HSI) technology was used to obtain images and spectral information of maize seeds with different aging stages. To reduce data input and improve model detection speed while obtaining more stable prediction results, successive projections algorithm (SPA) was used to extract key wavelengths that characterize seed viability, then key wavelength images of maize seed were divided into small blocks with 5 pixels &#xd7;5 pixels and fed into a multi-scale 3D convolutional neural network (3DCNN) for further optimizing the discrimination possibility of single-seed viability. The final discriminant result of single-seed viability was determined by comprehensively evaluating the result of all small blocks belonging to the same seed with the voting algorithm. The results showed that the multi-scale 3DCNN model achieved an accuracy of 90.67% for the discrimination of single-seed viability on the test set. Furthermore, an effort to reduce labor and avoid the misclassification caused by human subjective factors, a YOLOv7 model and a Mask R-CNN model were constructed respectively for germination judgment and bud length detection in this study, the result showed that mean average precision (mAP) of YOLOv7 model could reach 99.7%, and the determination coefficient of Mask R-CNN model was 0.98. Overall, this study provided a feasible solution for detecting maize seed viability using HSI technology and multi-scale 3DCNN, which was crucial for large-scale screening of viable seeds. This study provided theoretical support for improving planting quality and crop yield.</p>
</abstract>
<kwd-group>
<kwd>viability detection</kwd>
<kwd>maize seeds</kwd>
<kwd>hyperspectral imaging</kwd>
<kwd>YOLOv7 model</kwd>
<kwd>3D convolution neural network</kwd>
</kwd-group>
<counts>
<fig-count count="9"/>
<table-count count="5"/>
<equation-count count="15"/>
<ref-count count="61"/>
<page-count count="15"/>
<word-count count="7637"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Technical Advances in Plant Science</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Single-seed sowing is a crucial strategy to boost corn production, save seeds, and reduce labor, but it demands high-quality seeds (<xref ref-type="bibr" rid="B29">Li et&#xa0;al., 2017</xref>). On October 11th, 2020, a new standard has been released by China, which raises the germination rate index for single-seed sowing from 85% to 93%. The viability is a critical indicator for evaluating the quality and practicality of seed. Assessment of seed viability could ensure each seed has the potential for germination and healthy growth and promotes the popularization of single-seed sowing. This not only facilitates mechanized sowing and reduces the laboriousness of manual interplanting and seedling transplantation, but also significantly reduces the amount of seed used and conserves a considerable amount of seed production area (<xref ref-type="bibr" rid="B30">Liang et&#xa0;al., 2020</xref>). Therefore, the determination of seed viability is of utmost importance in reducing the cost and time loss resulting from planting failures and conserving human resources.</p>
<p>Seed viability is a quality characteristic at the individual level rather than a quantitative trait at the population level. Loss of viability among individuals in the same population is not synchronous, making it challenging to detect the viability of single-seed. According to the International Seed Testing Association (ISTA) rules (<xref ref-type="bibr" rid="B6">Association, I.S.T, 1999</xref>), common methods for seed viability detection include germination and staining (<xref ref-type="bibr" rid="B12">Cheng et&#xa0;al., 2023</xref>). The conventional germination method is the most accurate, but it is time-consuming and requires a lot of material resources. On the other hand, staining is only suitable for a small number of samples. Therefore, it is necessary to develop a rapid-nondestructive technique for single-seed viability detection in large quantities.</p>
<p>In the field of seed quality detection, hyperspectral imaging technology has been widely utilized. However, research on seed viability detection is relatively limited. Jannat <xref ref-type="bibr" rid="B55">Yasmin et&#xa0;al. (2022)</xref> presented an online detection system of watermelon seed viability based on longwave near-infrared (LWNIR) HSI, demonstrating its potential application in predicting seed viability. <xref ref-type="bibr" rid="B48">Wang et&#xa0;al. (2021)</xref> developed the discrimination models of seed viability using the feature wavelengths and full wavelengths of the visible and shortwave near-infrared (Vis-SWNIR), the result revealed that both models attained an accuracy rate surpassing 95%, suggesting that the seeds with different aging stages exhibited unique spectral features, and the characteristic wavelengths can effectively provide the key information of Zea mays seed quality. <xref ref-type="bibr" rid="B35">Pang et&#xa0;al. (2021)</xref> conducted a germination experiment on maize seeds with different aging stages, a 2D convolutional neural network (2DCNN) model was developed by combing deep learning algorithms with hyperspectral technology. The accuracy of this model reached 99.96%, which was significantly higher than machine learning and one-dimensional convolutional neural network (CNN). It was worth pointing out that the model demonstrated a relatively fast convergence speed, which highlighted the feasibility and effectiveness of combining deep learning with hyperspectral technology to determine the viability of single-seed. <xref ref-type="bibr" rid="B4">Ambrose et&#xa0;al. (2016)</xref> investigated the feasibility of using HSI technology to differentiate the viability of maize seeds. One group of maize samples was subjected to microwave heat treatment, while the other group served as the control. PLS-DA was employed to classify the heat-treated (aged) and untreated (normal) maize seeds. The results showed that the classification model achieved the highest classification accuracy in the LWNIR region, with calibration set accuracy of 97.6% and prediction set accuracy of 95.6%. These studies achieve high accuracy by predicting the aging level or treatment condition of seeds instead of the actual results of germination experiments. And they mainly rely on overall image information for seed viability classification. However, they overlook the significance of local information within seeds and fail to consider subtle variations and characteristics in different seed regions.</p>
<p>Generally, the evaluation of germination rate of seeds mainly depends on manual labor, which is time-consuming and cumbersome. <xref ref-type="bibr" rid="B58">Zhao et&#xa0;al. (2022)</xref> proposed a detection method for the germination rate of rice seeds using deep learning models, which took an average of 0.011 seconds for each image while achieving a mAP of 0.9539, meeting the demands of real-time detection, indicating that the YOLO-r model had great potential for rapidly and precisely determining the germination status of seeds. <xref ref-type="bibr" rid="B7">Bai et&#xa0;al. (2023)</xref> developed an improved discriminative approach for the detection of seed germination using YOLOv5. This technique enables the swift evaluation of parameters such as wheat seed germination rate, germination potential, germination index, and average germination days.</p>
<p>The emergence ability of seedlings is crucial for seed growth and crop yield improvement (<xref ref-type="bibr" rid="B17">Cui et&#xa0;al., 2020</xref>). In recent studies, significant progress has been made in correlating seed germination ability and seedling growth through various measurement methods. However, traditional manual measurement techniques for assessing parameters like bud length have been found to be inefficient and prone to errors due to the complex and twisted nature of buds. To address this issue, <xref ref-type="bibr" rid="B2">Adegbuyi and Burris (1988)</xref> found there was a significant correlation between seed germination ability and seedling growth by measuring comprehensive growth parameters. However, manual measurement method of bud length is inefficient and error-prone due to their curved and twisted nature. <xref ref-type="bibr" rid="B23">Gaikwad et&#xa0;al. (2019)</xref> developed a semi-automated tool for measuring leaf length, width, and area. Abdelaziz <xref ref-type="bibr" rid="B44">Triki et&#xa0;al. (2021)</xref> used the Mask R-CNN algorithm to effectively segment and measure leaf characteristics and obtained an error rate of around 5%. An enhanced algorithm based on the mask RCNN was introduced by <xref ref-type="bibr" rid="B36">Shen et&#xa0;al. (2023)</xref> to recognize defective wheat kernels. The experimental outcomes showed that this refined algorithm facilitated quicker and more precise detection of unsound kernels, effectively tackling issues linked to kernel adhesion. <xref ref-type="bibr" rid="B34">Masood et&#xa0;al. (2021)</xref> propose an automated method that utilizes the Mask RCNN model to achieve precise localization and segmentation of brain tumors. <xref ref-type="bibr" rid="B16">Cui et&#xa0;al. (2022)</xref> constructed a recognition model using hyperspectral data and feature extraction algorithms to predict maize root length, showing a significant correlation between root length and viability. Therefore, it is of great significance to measure and predict the seed viability using computer technology.</p>
<p>The above study highlighted the significance of seed viability determination and emphasized the need of developing rapid and non-destructive technology for single-seed viability detection. HSI has been established as a useful tool for seed quality detection, and the integration of deep learning and hyperspectral technology can establish an effective seed viability detection model. However, previous studies commonly used relatively simple models, and lacking the prediction model of maize seeds viability developed by 3DCNN and hyperspectral images. This study proposed an improved method for identifying the viability of maize seeds based on germination experiments. The aim of the study is to explore the potential of using hyperspectral images and 3DCNN to identify the viability of maize seeds. Specifically, the objectives are to: (1) select characteristic wavelengths that represent seed viability, (2) combine HSI with 3DCNN to establish the optimal classification model for maize seed viability, (3) evaluate the feasibility of using YOLOv7 model instead of the human eye to determine the seed germination status, (4) evaluate the ability of Mask R-CNN in bud segmentation and bud length prediction.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Maize sample preparation</title>
<sec id="s2_1_1">
<label>2.1.1</label>
<title>Aging experiment</title>
<p>Due to the high quality and the resistance to multiple stressors, &#x201c;Jingke 968&#x201d; maize is extensively cultivated in eastern and northern China. Therefore, it was selected as the experiment sample in this study. To ensure the accuracy of the experiment, seeds with uniform size and shape were manually selected, then all seeds were disinfected by soaking them in a 0.5% sodium hypochlorite solution for 5 minutes, followed by rinsing with distilled water five times, and air-dried under natural conditions.</p>
<p>To simulate the natural aging process of seeds, the experiment samples were artificially aged. All seeds were exposed to high temperature and high humidity conditions (45 &#xb0;C and a relative humidity of 95%) and stirred twice a day to ensure uniform exposure (<xref ref-type="bibr" rid="B57">Zhang et&#xa0;al., 2020</xref>). 150 maize seeds were taken out randomly at aging 2, 4, 6, and 8 days, respectively. Additionally, 150 untreated seeds were selected as the control group (CK). Therefore, a total of 750 maize seeds within five aging stages were obtained and used for subsequent experimentation.</p>
</sec>
<sec id="s2_1_2">
<label>2.1.2</label>
<title>Hyperspectral imaging system</title>
<p>Two HSI systems, the Vis-SWNIR and LWNIR, have been built in the Intelligent Detection Laboratory of the China Agricultural Equipment Technology Research Center (<xref ref-type="bibr" rid="B20">Fan et&#xa0;al., 2018</xref>). The Vis-SWNIR system is capable of acquiring hyperspectral images within the wavelength range of 327-1098 nm, encompassing 1000 spectral variables, while the LWNIR system can capture images within the range of 930-2548 nm, containing 256 spectral variables. The Vis-SWNIR system includes an imaging spectrometer, an electron-multiplying charge-coupled device camera with a resolution of 502&#xd7;500, a camera lens, and a spectraCube data acquisition software. Similarly, the LWNIR system includes an imaging spectrometer, a charge-coupled device camera with a resolution of 320&#xd7;256, a camera lens, and a spectral acquisition software (<xref ref-type="bibr" rid="B43">Tian et al., 2021</xref>). And the acquisition software of both systems was developed using LabVIEW (National Instruments Inc., Austin, TX, USA) to facilitate the acquisition of spectral images, as well as to manage the camera and motor operations. Both systems share two 300-watt halogen lamps to provide stable illumination. In addition, an electrically operated moving platform and a computer are available for sample placement (Capable of accommodating up to 96 samples simultaneously) and hyperspectral image acquisition (<xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1A</bold></xref>) (<xref ref-type="bibr" rid="B32">Liu et&#xa0;al., 2022</xref>).</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Diagram of the 3DCNN for hyperspectral image classification <bold>(A)</bold> Hyperspectral image acquisition device, <bold>(B)</bold> Regional voting, <bold>(C)</bold> Conventional 3DCNN model, <bold>(D)</bold> Multi-scale 3DCNN model.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-14-1248598-g001.tif"/>
</fig>
<p>To ensure the accuracy and reliability of the hyperspectral images (<italic>E<sub>raw</sub>
</italic>), calibration operation is essential to eliminate the effects of uneven illumination of the light source and camera dark current changes (<xref ref-type="bibr" rid="B5">An et&#xa0;al., 2022</xref>). The calibration operation involved using a white reflection board (with a reflectance of 99%) (<italic>E<sub>w</sub>
</italic>) to acquire a standard white reference image in the same sampling environment as the sample, while turning off the light source and covering the lens to obtain a black reference image (with a reflectance of 0%) (<italic>E<sub>d</sub>
</italic>). The calibrated image can be calculated using the following formula:</p>
<disp-formula>
<label>(1)</label>
<mml:math display="block" id="M1">
<mml:mrow>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mi>d</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi>E</mml:mi>
<mml:mi>w</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>E</mml:mi>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>After calibration, in the Vis-SWNIR region, a subset of 347 spectral variables within the 420-1000 nm range was selected for further analysis, considering the abundance of spectral data and the presence of duplicate information in adjacent spectra. On the other hand, in the near-infrared region, due to the limited number of available bands, all spectral variables (256) were directly included in the analysis. To separate maize seeds from the background, a mask was applied to segment the hyperspectral image. The gray-scale images at 801 nm and 1098 nm were selected as the mask images for the Vis-SWNIR and LWNIR bands, respectively. The average spectral curves were obtained by calculating the mean reflectance under the mask. Lastly, in order to eliminate the influence of the instrument, the Savitzky-Golay (SG) and Standard Normal Variate (SNV) methods were utilized to preprocess the spectra.</p>
</sec>
<sec id="s2_1_3">
<label>2.1.3</label>
<title>Standard germination test</title>
<p>A transparent box measuring 25cm&#xd7;25cm was used as a germination chamber, and 75 seeds were placed in each box. A total of 10 boxes were used in the experiment. Prior to the germination test, the germination boxes were sterilized with 75% ethanol (<xref ref-type="bibr" rid="B39">Suksungworn et&#xa0;al., 2021</xref>), and three layers of gauze were placed in each germination box to provide continuous moisture for the seeds. A black gauze was placed on the top layers as the background for photography (<xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2A</bold></xref>). An equal amount of distilled water was added to each box, and the temperature was set to 25&#xb0;C with 12-hour intervals of light and dark (<xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2B</bold></xref>). Throughout the 7-day germination experiment (<xref ref-type="bibr" rid="B33">Long et&#xa0;al., 2022</xref>), the germination progress of maize seeds was monitored daily at specific time intervals. According to the ISTA standard, the germination rate was determined (<xref ref-type="bibr" rid="B49">Wang et&#xa0;al., 2022c</xref>).</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Diagram of the standard germination experiment <bold>(A)</bold> Corn seed samples, <bold>(B)</bold> Germination of seeds in a climate chamber, <bold>(C)</bold> Sprouted seeds, <bold>(D)</bold> RGB iamge acquisition device.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-14-1248598-g002.tif"/>
</fig>
</sec>
<sec id="s2_1_4">
<label>2.1.4</label>
<title>RGB image acquisition</title>
<p>RGB images of maize seeds were captured using BASLER industrial cameras (acA1920-25um/uc, BASLER AG, Germany, 2.4 MP,100 fps) during germination test (<xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2D</bold></xref>) (<xref ref-type="bibr" rid="B36">Shen et&#xa0;al., 2023</xref>). An adjustable camera platform was built to ensure consistency of the images and prevent camera shake. The position of the germination box relative to the lens was kept fixed during each image capture. Indoor lighting was turned on and curtains were drawn for each capture. After placing the seeds into the boxes (Day 0), images of each box were immediately captured. Subsequently, images were captured every 15 hours for 7 consecutive days (<xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2C</bold></xref>). The dataset used in this study consisted of a total of 3000 maize seeds (All the captured RGB images collectively contain 3000 seeds). Among them, 2250 seeds were designated as training samples, while the remaining seeds were allocated to the test set.</p>
</sec>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Data processing</title>
<sec id="s2_2_1">
<label>2.2.1</label>
<title>Successive projections algorithm</title>
<p>Hyperspectral data typically consists of numerous bands, and certain bands may exhibit high correlation or contain redundant information (<xref ref-type="bibr" rid="B26">Han et&#xa0;al., 2022</xref>).When training 3DCNN with full-band data, it will lead to a significant increase in the number of networks training parameters, resulting in a more complex model. This phenomenon is commonly referred to as the curse of dimensionality. (<xref ref-type="bibr" rid="B28">K&#xf6;ppen, 2000</xref>). However, band selection (<xref ref-type="bibr" rid="B40">Sun and Du, 2019</xref>) allows retaining spectral bands that are closely related to seed vigor assessment while removing irrelevant bands, thereby enhancing the feature extraction and discriminative capabilities of the model.</p>
<p>Additionally, the use of dimensionality reduction data sets can effectively reduce the complexity of the model, mitigating the risk of overfitting and enhancing the model&#x2019;s generalization ability and stability (<xref ref-type="bibr" rid="B3">Aloupogianni et&#xa0;al., 2023</xref>). Moreover, fewer computing resources are required during model training and inference, leading to a significant improvement in the computational efficiency of the model (<xref ref-type="bibr" rid="B53">XingJia et&#xa0;al., 2022</xref>).</p>
<p>Successive projections algorithm is a classical band selection method that can map high-dimensional spectral data to a low-dimensional space through multiple projections(<xref ref-type="bibr" rid="B18">de Almeida et&#xa0;al., 2018</xref>). SPA is a forward iterative search method used for selecting spectral information with minimal redundancy to address collinearity issues. The steps of SPA are shown in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>. The SPA is widely used in hyperspectral image processing attributed to its advantages of fast computation speed and easy implementation (<xref ref-type="bibr" rid="B10">Chen et&#xa0;al., 2023b</xref>). Therefore, the SPA was used in this study to perform feature selection on the processed average spectra of Vis-SWNIR and LWNIR, in order to perform dimensionality reduction on the hyperspectral data.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Successive projections algorithm.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left" colspan="2">Input:Dataset with features and target variable<break/>Output:Feature subset for analysis</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><bold>Step 1:</bold> For each feature in the dataset:<break/>&#x2003;a. Compute projection coefficients with respect to the target variable.<break/>&#x2003;b. Store the computed coefficients.</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Step 2:</bold> Initialize an empty feature subset.<break/><bold>Step 3:</bold> Repeat until desired subset size is reached or stopping criterion met:<break/>&#x2003;a. Find the feature with the maximum projection coefficient.<break/>&#x2003;b. Add the selected feature to the feature subset.<break/>&#x2003;c. Project out the influence of selected features on remaining features.<break/>&#x2003;d. Recalculate projection coefficients of remaining features.</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2_2_2">
<label>2.2.2</label>
<title>Machine learning</title>
<p>Support Vector Machine (SVM) (<xref ref-type="bibr" rid="B15">Cortes and Vapnik, 1995</xref>) is a powerful algorithm for classification and regression that finds an optimal hyperplane to separate data points of different classes. It handles high-dimensional datasets, avoids overfitting, and can handle non-linear problems using kernel functions. K-Nearest Neighbor (KNN) (<xref ref-type="bibr" rid="B56">Zhang, 2022</xref>) is a basic algorithm that selects the K nearest samples based on their feature values and uses their labels as predictions. Subspace Discriminant Analysis (SDA) (<xref ref-type="bibr" rid="B60">Zhao and Phillips, 1999</xref>) is a pattern classification method that aims to find a low-dimensional subspace to maximize the separation between different classes. In this study, the aforementioned machine learning methods were used to classify the viability of maize seeds at different aging stages for optimal classification accuracy.</p>
</sec>
<sec id="s2_2_3">
<label>2.2.3</label>
<title>Deep convolutional neural network</title>
<p>The CNN combines the concepts of convolutional filtering and neural networks by utilizing local receptive fields and weight sharing to reduce the number of network parameters and speed up model training (<xref ref-type="bibr" rid="B25">Ghaderizadeh et&#xa0;al., 2021</xref>). Compared to the widespread use of two-dimensional convolution, three-dimensional convolution is less commonly used in practice. However, HSI contain rich spectral information, and using two-dimensional convolution may make the interband correlations of HSIs underutilized (<xref ref-type="bibr" rid="B24">Ge et&#xa0;al., 2020</xref>). To address this issue, this study introduced a 3DCNN, which can thoroughly extract feature relationships across different feature channels (<xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1C</bold></xref>), thereby enabling it to concurrently extract integrated spectral and spatial features from hyperspectral imagery (<xref ref-type="bibr" rid="B41">Sun et&#xa0;al., 2022</xref>).</p>
<p>Before inputting hyperspectral images into the network, standardization is performed to ensure that the data is within the same scale and range, enabling the network to learn weights faster and converge more easily during training. Moreover, data standardization can help avoid the problems of gradient disappearance or explosion, and improve the stability and generalization ability of the network. To obtain multiple convolutional features of HSI, multi-scale convolution is employed in the same convolutional layer, which can acquire both global and local information. Four different convolution kernels of 3&#xd7;3&#xd7;3, 3&#xd7;3&#xd7;5, 3&#xd7;5&#xd7;5, and 5&#xd7;5&#xd7;5 were selected to extract feature information and fused on the channel. This method can enhance the classification accuracy of the model. As illustrated in <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1D</bold></xref>, each convolution kernel in the first convolution module has 16 filters, each kernel in the second convolution module has 32 filters, and each kernel in the third convolution module has 64 filters. The activation function in the three-dimensional convolution module uses Rectified Linear Unit (RELU) and is compressed by the pooling layer to reduce the amount of data and parameters, as well as alleviate the overfitting phenomenon. To ensure that the features extracted by different convolution kernels in the same module can be effectively connected, different parameters need to be set according to different situations, such as stride and padding. Finally, the output is produced through 1 fully connected layer and 1 output layer, and the output layer employs the SoftMax activation function.</p>
<p>To extract features from hyperspectral images of maize seeds at a more microscopic level and increase the amount of data, a window size of 5&#xd7;5 was selected for segmentation (<xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1B</bold></xref>). To eliminate the influence of background on classification, small blocks containing 0-pixel points were discarded. As the size of maize seeds varies, the number of blocks obtained from different segments of maize seeds is also inconsistent. To address this issue, this study employed a majority principle labeling aggregation method, as <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Majority principle labeling aggregation method.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left" colspan="2">Input: Segmented maize seed blocks<break/>Output: Predicted potential for germination of maize seeds</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Step 1: Initialize:<break/>&#x2003;a. Assign Label 1 to represent potential for germination.<break/>&#x2003;b. Assign Label 2 to indicate maize grain block affiliation.</td>
</tr>
<tr>
<td valign="top" align="left">Step 2: For each segmented maize seed block:<break/>&#x2003;a. Feed the block into the model for prediction of its potential for<break/>germination.<break/>&#x2003;b. Store the prediction result.</td>
</tr>
<tr>
<td valign="top" align="left">Step 3: For each maize seed:<break/>&#x2003;a. Retrieve predictions of multiple small blocks belonging to the same maize<break/>seed.<break/>&#x2003;b. Count the number of correct predictions.<break/>&#x2003;c. If more than half of the predictions are correct:<break/>- The predicted result of the maize seed is deemed correct.</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In this study, the germination experiment showed that 404 viable samples and 346 nonviable samples were collected from 750 seeds. Given that the hyperspectral images were collected in a sequential manner based on the aging gradients of the seeds, it was crucial to maintain a balanced distribution of germinated and non-germinated samples in the test set. Therefore, a representative test set was carefully selected, consisting of 75 seeds, including the first seed, the 10th seed, the 20th seed, and so on. The remaining 675 seeds were allocated for the training phase. Through this meticulous approach, it was ensured that the test set encompassed samples from diverse categories, enabling an accurate evaluation of the classification model&#x2019;s performance.</p>
</sec>
<sec id="s2_2_4">
<label>2.2.4</label>
<title>Establishment of Mask R-CNN model for bud length detection</title>
<p>In order to measure the length of maize seed bud, the Mask R-CNN (<xref ref-type="bibr" rid="B27">He et&#xa0;al., 2017</xref>) (With resnet50_fpn as backbone) model was utilized to segment the bud from single-seed image firstly, then a skeleton extraction algorithm was applied to extract the skeleton of the bud (<xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3A</bold></xref>). Next, the bud length detection algorithm was used to remove the branches in the skeleton for obtaining the central skeleton image. Finally, the actual bud length was calculated by converting pixels to actual length (<xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3B</bold></xref>).</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Diagram of the maize bud length detection process <bold>(A)</bold> Process of bud segmentation, <bold>(B)</bold> Process of bud length detection.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-14-1248598-g003.tif"/>
</fig>
<p>Mask R-CNN is a deep learning model that combines object detection and instance segmentation. It extends Faster R-CNN by generating binary masks for each region of interest (ROI), achieving pixel-level segmentation. The network consists of three main components: a backbone network, a Region Proposal Network (RPN) responsible for generating candidate object regions, and two parallel branches dedicated to object detection and mask prediction. Mask R-CNN excels in instance segmentation, object detection, and keypoint detection, making significant contributions to computer vision advancements (<xref ref-type="bibr" rid="B9">Casado-Garc&#xed;a et&#xa0;al., 2019</xref>). The model employs a multi-task loss function, comprising classification loss (<italic>L<sub>cls</sub>
</italic>), bounding box loss (<italic>L<sub>bbox</sub>
</italic>), and predicted mask loss (<italic>L<sub>mask</sub>
</italic>), as represented by equations (2) to (5) (<xref ref-type="bibr" rid="B14">Cong et&#xa0;al., 2023</xref>).</p>
<disp-formula>
<label>(2)</label>
<mml:math display="block" id="M2">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mtext>L&#xa0;=&#xa0;L</mml:mtext>
</mml:mrow>
<mml:mrow>
<mml:mtext>cls</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:msub>
<mml:mtext>L</mml:mtext>
<mml:mrow>
<mml:mtext>Lbox</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>+</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:msub>
<mml:mtext>L</mml:mtext>
<mml:mrow>
<mml:mtext>mask</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(3)</label>
<mml:math display="block" id="M3">
<mml:mrow>
<mml:msub>
<mml:mtext>L</mml:mtext>
<mml:mrow>
<mml:mtext>cls</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:munder>
<mml:mo>&#x2211;</mml:mo>
<mml:mtext>i</mml:mtext>
</mml:munder>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>log</mml:mi>
<mml:mfenced close="]" open="[">
<mml:mrow>
<mml:msub>
<mml:mtext>p</mml:mtext>
<mml:mtext>i</mml:mtext>
</mml:msub>
<mml:msub>
<mml:mtext>pi</mml:mtext>
<mml:mo>*</mml:mo>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:mfenced>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mtext>pi</mml:mtext>
<mml:mo>*</mml:mo>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mfenced>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mtext>p</mml:mtext>
<mml:mtext>i</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(4)</label>
<mml:math display="block" id="M4">
<mml:mrow>
<mml:msub>
<mml:mtext>L</mml:mtext>
<mml:mrow>
<mml:mtext>bbox</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:msub>
<mml:mtext>N</mml:mtext>
<mml:mrow>
<mml:mtext>reg</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:munder>
<mml:mo>&#x2211;</mml:mo>
<mml:mtext>i</mml:mtext>
</mml:munder>
<mml:msub>
<mml:mtext>pi</mml:mtext>
<mml:mo>*</mml:mo>
</mml:msub>
<mml:mtext>R&#x2009;</mml:mtext>
<mml:msub>
<mml:mrow>
<mml:mtext>(t</mml:mtext>
</mml:mrow>
<mml:mtext>i</mml:mtext>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:msub>
<mml:mtext>ti</mml:mtext>
<mml:mo>*</mml:mo>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(5)</label>
<mml:math display="block" id="M5">
<mml:mrow>
<mml:msub>
<mml:mtext>L</mml:mtext>
<mml:mrow>
<mml:mtext>mask</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:msup>
<mml:mtext>m</mml:mtext>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
<mml:munder>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2264;</mml:mo>
<mml:mtext>&#xa0;i</mml:mtext>
<mml:mo>,</mml:mo>
<mml:mtext>&#xa0;j</mml:mtext>
<mml:mo>&#x2264;</mml:mo>
<mml:mtext>&#xa0;m</mml:mtext>
</mml:mrow>
</mml:munder>
<mml:mfenced close="]" open="[">
<mml:mrow>
<mml:msubsup>
<mml:mtext>y</mml:mtext>
<mml:mrow>
<mml:mtext>ij</mml:mtext>
</mml:mrow>
<mml:mo>*</mml:mo>
</mml:msubsup>
<mml:mi>log</mml:mi>
<mml:msub>
<mml:mtext>y</mml:mtext>
<mml:mrow>
<mml:mtext>ij</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:mfenced>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msubsup>
<mml:mtext>y</mml:mtext>
<mml:mrow>
<mml:mtext>ij</mml:mtext>
</mml:mrow>
<mml:mo>*</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
<mml:mi>log</mml:mi>
<mml:mfenced>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mtext>y</mml:mtext>
<mml:mrow>
<mml:mtext>ij</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</disp-formula>
<p><italic>L<sub>cls</sub>
</italic> measures the deviation between predicted and actual values for overall accuracy assessment. <italic>L<sub>bbox</sub>
</italic> quantifies the disparity between predicted and actual position parameters, assessing the model&#x2019;s accuracy in bud localization. <italic>L<sub>mask</sub>
</italic> evaluates the model&#x2019;s confidence in pixel-level classification using binary cross-entropy. Combining these components into a multi-task loss function allows for comprehensive evaluation across multiple tasks, resulting in enhanced overall performance.</p>
<p>The skeleton extraction algorithm is a technique used to extract the central line or skeleton of an object in a binary image (<xref ref-type="bibr" rid="B22">Fu et&#xa0;al., 2023</xref>). By progressively shrinking connected regions within the object contour, the algorithm produces a concise contour that provides valuable information for image processing tasks like recognition and matching. Various algorithms, such as Zhang-Suen, Morphological Thinning, and Medial Axis Transform, can be employed for this purpose. The Medial Axis Transform (MAT) algorithm, specifically, extracts the object&#x2019;s central line by iteratively dilating boundary pixels and identifying the nearest internal pixels as skeleton pixels. This process continues until the skeleton pixels stabilize, resulting in a stable and versatile representation suitable for subsequent image processing tasks. The MAT algorithm handles different object shapes and can process grayscale information within binary images. Seed germination images exhibit a wide range of shape features, such as bud length, curvature, and angle. However, traditional methods for measuring bud length rely on manual measurements, which are time-consuming and prone to significant subjective biases. The MAT (Medial Axis Transform) skeleton extraction algorithm was chosen to obtain the central line of buds. However, the resulting skeleton may contain branches that need to be eliminated to derive the center skeleton. The process of centerline skeleton extraction is illustrated in the following <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3B</bold></xref>.</p>
<p>In this study, a transparent box with a side length of 250 mm was used as a reference to convert pixels to actual lengths in millimeters. The calculation formula is:</p>
<disp-formula>
<label>(6)</label>
<mml:math display="block" id="M6">
<mml:mrow>
<mml:mtext>Ratio&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mrow>
<mml:mi>b</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="false">/</mml:mo>
<mml:mn>1164</mml:mn>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Here, <italic>L<sub>box</sub>
</italic> represents the side length of the transparent box, and 1164 is the number of pixels corresponding to the transparent box in the image. According to the calculation formula, it can be derived that one pixel corresponds to 0.215 mm.</p>
</sec>
<sec id="s2_2_5">
<label>2.2.5</label>
<title>Establishment of YOLOv7 model for seed germination detection</title>
<p>The seed quality detection methods such as germination and staining techniques are time-consuming and rely heavily on human intervention, which may lead to inaccurate results due to human error. In order to develop an automated and standardized method for detecting seed germination that is efficient, accurate, and reliable, the YOLOv7 (<xref ref-type="bibr" rid="B45">Wang et&#xa0;al., 2022b</xref>) object detection algorithm was selected in this study, which is one of the most widely used algorithms for object detection since its release in 2015 (<xref ref-type="bibr" rid="B19">Dewi et&#xa0;al., 2023</xref>). YOLOv7 is a real-time object detection algorithm (<xref ref-type="bibr" rid="B38">Soeb et&#xa0;al., 2023</xref>), which has evolved from YOLOv5 and has faster inference speed, improved detection accuracy, and reduced computational complexity. The algorithm consists of three main parts: the input layer, backbone layer, and output layer (<xref ref-type="bibr" rid="B42">Tang et&#xa0;al., 2023</xref>), and uses either a loss function with or without an auxiliary training head (<xref ref-type="bibr" rid="B61">Zhou et&#xa0;al., 2023</xref>).</p>
<p>The loss function is used to update the gradient loss during the training process (<xref ref-type="bibr" rid="B8">Cai et&#xa0;al., 2023</xref>). The YOLOv7 algorithm is evaluated using various metrics such as precision, mAP, recall, and F1 score (<xref ref-type="bibr" rid="B59">Zhao et&#xa0;al., 2023</xref>), and curves such as the F1-Confidence curve, precision-confidence curve, recall-confidence curve, and precision-recall curve are used to optimize the algorithm&#x2019;s performance and achieve the best balance between precision and recall.</p>
<p>This study utilized a self-built dataset of maize seeds, comprising images of seeds from various angles and sizes, each with corresponding labels in YOLO format. The data collection and preprocessing process was conducted using the same method as Mask R-CNN. The dataset used in this study consisted of a total of 7000 maize seeds. Among these, 4200 seeds were designated as training samples, 1400 seeds were allocated for the test sets, and the remaining seeds were assigned to the validation sets. To enhance the accuracy and robustness of the model, the YOLOv7.pt (<ext-link ext-link-type="uri" xlink:href="https://github.com/WongKinYiu/yolov7">https://github.com/WongKinYiu/yolov7</ext-link>) pretrained weights provided by the official website were employed for training. These weights were trained on a large-scale dataset, which can significantly reduce the training time while improving the training effect. The Adam optimizer, a widely used optimizer that can optimize at different learning rates, was used to update the model parameters during training. The parameters of the Adam optimizer were adjusted based on the size of the learning rate in the training process to achieve better training results. A batch size of 2 and a training iteration of 300 were used in this study.</p>
</sec>
</sec>
</sec>
<sec id="s3" sec-type="results|discussioin">
<label>3</label>
<title>Results and discussion</title>
<sec id="s3_1">
<label>3.1</label>
<title>Seed germination result</title>
<p>The experimental results showed that the degree of seed aging was significantly correlated with the germination rate. As shown in the <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>, on the seventh day of observation, all seeds that were not aged can germinate, and only a few seeds that aged for 2 days failed to do so. Most seeds that aged for 4 days still retained their viability, with only a few seeds that aged for 6 days able to germinate. Seeds that aged for 8 days experience almost complete mortality. Thus, it can be inferred that seed aging leads to a decline in the germination rate, and the more prolonged the aging process, the more apparent the decline in the germination rate.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Germination levels of seeds at different aging times.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-14-1248598-g004.tif"/>
</fig>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Average spectrum</title>
<p>By analyzing the spectral curve features (<xref ref-type="fig" rid="f5"><bold>Figures&#xa0;5A, B</bold></xref>), it is easy to observe that the spectral reflectance of both wavelength regions increased with the decreasing of maize seed viability, indicating that the light absorption capacity of maize tissue increases with the aging degree. The spectral curves are monotonic in the Vis-SWNIR region, with the average spectral curve gradually increasing in the 400-800 nm region and then slowly decreasing. However, in the LWNIR region, the spectral curve is more complex, capturing two distinct reflection peaks located around 1100 nm and 1300 nm, respectively. The former could potentially be associated with the presence of C-H bonds in lipids, while the latter could be described as a combination of the first overtone of N-H stretching along with the fundamental N-H in-plane bending and C-N stretching with N-H in-plane bending vibrations (<xref ref-type="bibr" rid="B47">Wang et&#xa0;al., 2022d</xref>).The spectral curve characteristics can be used to discriminate maize seeds with different germination potentials. As shown in <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>, the spectral data of maize seeds with different viability have similar trends in the Vis-SWNIR and LWNIR regions. However, in the Vis-SWNIR region, these curves are basically mixed together, making it difficult to distinguish clearly. In contrast, there are significant differences in the LWNIR region, which may be related to the breakdown of chemicals during the aging process of organisms. Nevertheless, some mixed situations still exist, indicating that it is difficult to distinguish the seeds with or without viability according to the average spectra of hyperspectral image.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Average spectra and the distribution of optimal bands <bold>(A)</bold> Near-infrared average spectra, <bold>(B)</bold> Visible and near-infrared average spectra. <bold>(C)</bold> Selection of characteristic bands in near-infrared spectra, <bold>(D)</bold> Selection of characteristic bands in visible and near-infrared spectra.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-14-1248598-g005.tif"/>
</fig>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Classification accuracy curves of maize seed viability based on conventional 3DCNN models using Vis-SWNIR hyperspectral image <bold>(A)</bold> Classification accuracy curves in test set (400 iterations), <bold>(B)</bold> Classification accuracy curves in test set (70 iterations), <bold>(C)</bold> Classification accuracy curves in train set (400 iterations), <bold>(D)</bold> Classification accuracy curves in train set (70 iterations).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-14-1248598-g006.tif"/>
</fig>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Key wavelength selection of maize seed viability</title>
<p>During the aging process of maize seeds, a series of changes occurs in the internal chemical substances (<xref ref-type="bibr" rid="B52">Xin et&#xa0;al., 2011</xref>), with the extent of these changes depending on the degree of seed vitality. These chemical substances include stored energy and nutrients, such as starch, proteins, and lipids (<xref ref-type="bibr" rid="B54">Xu et&#xa0;al., 2022</xref>). Proteins may undergo degradation, leading to the release of amino acids and structural damage to proteins. At the same time, the lipid content in the seed gradually oxidizes, resulting in lipid decomposition and the generation of free radicals, thereby affecting the seed&#x2019;s metabolism and viability. Additionally, starch gradually degrades into soluble sugars. This difference is the main reason for spectral changes during the aging process. After SG and SNV preprocessing, 18 and 11 characteristic bands were extracted from the Vis-SWNIR region and LWNIR region (<xref ref-type="fig" rid="f5"><bold>Figures&#xa0;5C, D</bold></xref>). These characteristic bands were located at the peaks and valleys of the spectrum, reflecting the changes in water content and protein levels of the seeds.</p>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Maize seed viability detection based on full-wavelength spectra and machine learning</title>
<p>By analyzing the classification accuracy obtained from SVM and Ensemble analysis, there was no significant difference between Vis-SWNIR and LWNIR regions in predicting seed viability (<xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>). However, KNN exhibited slightly higher accuracy with LWNIR, indicating its greater universality and better performance in detecting seed viability. However, due to the minimal differences between seeds with adjacent aging gradients (<xref ref-type="bibr" rid="B21">Feng et&#xa0;al., 2018</xref>), particularly those seeds that aged for 4 days and 6 days, these distinctions may not be immediately discernible, presenting a challenge in accurately determining the germination potential of seeds with similar levels of aging. The germination experiment also showed that the seeds with relatively mild aging did not have inherent germination trends and were easily misclassified by the prediction model. This discrepancy may arise from the fact that maize seeds may not exhibit overt phenotypic changes across different stages of aging (<xref ref-type="bibr" rid="B46">Wang et&#xa0;al., 2022e</xref>). However, in actuality, mRNA molecules associated with protein synthesis undergo oxidation through physiological mechanisms. More specifically, research unveiled significantly elevated expression levels of mature enzyme genes and ribosomal protein genes in embryonic roots and shoots as compared to other parts(<xref ref-type="bibr" rid="B50">Wang et&#xa0;al., 2022a</xref>). This obstruction hampers protein synthesis, consequently impeding the normal physiological functions of the seeds.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>The classification result of maize seed viability based on full-wavelength spectra and machine learning.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" rowspan="2" align="left">Models</th>
<th valign="top" colspan="2" align="left">Vis-SWNIR</th>
<th valign="top" colspan="2" align="left">LWNIR</th>
</tr>
<tr>
<th valign="top" align="center">Train set</th>
<th valign="top" align="center">Prediction set</th>
<th valign="top" align="center">Train set</th>
<th valign="top" align="center">Prediction set</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="left">89.3%</td>
<td valign="top" align="left">83.9%</td>
<td valign="top" align="left">84.0%</td>
<td valign="top" align="left">83.3%</td>
</tr>
<tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="left">72.0%</td>
<td valign="top" align="left">69%</td>
<td valign="top" align="left">85.3%</td>
<td valign="top" align="left">77.8%</td>
</tr>
<tr>
<td valign="top" align="left">Ensemble</td>
<td valign="top" align="left">92%</td>
<td valign="top" align="left">82.4%</td>
<td valign="top" align="left">85.3%</td>
<td valign="top" align="left">82.5%</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3_5">
<label>3.5</label>
<title>Maize seed viability detection based on key wavelength and 3DCNN model</title>
<p>After 70 training epochs on the Vis-SWNIR hyperspectral images, the accuracy of the training set has stabilized at a high level of 100% (<xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6B</bold></xref>), and the accuracy of the test set has also reached its peak. In order to further validate the stability and robustness of the model, the number of training epochs was increased to 400. After 400 iterations, the accuracy of the training set remained at around 100%, while the accuracy of the test set remained at around 90% (<xref ref-type="fig" rid="f6"><bold>Figures&#xa0;6A, C</bold></xref>).</p>
<p>By using 3DCNN to process the data, not only the spectral information was considered (<xref ref-type="bibr" rid="B51">Wu et&#xa0;al., 2021</xref>), but also the image information was integrated, making the evaluation of maize seed quality more comprehensive and accurate (<xref ref-type="bibr" rid="B13">Collins et&#xa0;al., 2021</xref>). Compared with machine learning methods that using all spectral bands as input data, the 3DCNN method only used few representative bands. Traditional machine learning methods tend to lose a lot of information, while the 3DCNN method used in this study can learn more complex features and achieved higher accuracy with fewer bands, with an average accuracy increase of 7 percentage points (<xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref>). It was worth noting that 3DCNN performs better on the test set and converges faster, which indicated that 3DCNN was an effective method for seed viability detection and had advantages over machine learning classification method in dealing with such problems.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>The classification performance of the maize seed viability based on 3DCNN.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" rowspan="3" colspan="2" align="left">Block</th>
<th valign="middle" colspan="4" align="left">Models</th>
</tr>
<tr>
<th valign="middle" colspan="2" align="left">Multi-3DCNN</th>
<th valign="middle" colspan="2" align="left">Conventional-3DCNN</th>
</tr>
<tr>
<th valign="middle" align="left">Vis-SWNIR</th>
<th valign="middle" align="left">SWNIR</th>
<th valign="middle" align="left">Vis-SWNIR</th>
<th valign="middle" align="left">SWNIR</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left"/>
<td valign="middle" align="left">5 pixels&#xd7;5 pixels</td>
<td valign="middle" align="left">90.67%</td>
<td valign="middle" align="left">90.67%</td>
<td valign="middle" align="left">92.00%</td>
<td valign="middle" align="left">88%</td>
</tr>
<tr>
<td valign="middle" align="left">Split</td>
<td valign="middle" align="left">10 pixels&#xd7;10 pixels</td>
<td valign="middle" align="left">92.00%</td>
<td valign="middle" align="left">87.33%</td>
<td valign="middle" align="left">92.00%</td>
<td valign="middle" align="left">85.33%</td>
</tr>
<tr>
<td valign="middle" align="left"/>
<td valign="middle" align="left">20 pixels&#xd7;20 pixels</td>
<td valign="middle" align="left">85.33%</td>
<td valign="middle" align="left">79.00%</td>
<td valign="middle" align="left">86.67%</td>
<td valign="middle" align="left">78.67%</td>
</tr>
<tr>
<td valign="middle" colspan="2" align="left">No-split</td>
<td valign="middle" align="left">80.80%</td>
<td valign="middle" align="left">78.50%</td>
<td valign="middle" align="left">79.60%</td>
<td valign="middle" align="left">77.50%</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Conventional 3DCNN and multi-scale 3DCNN exhibit different characteristics. Traditional 3DCNN can achieve high accuracy, but they often exhibit slower convergence compared with multi-scale 3DCNN (<xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6D</bold></xref>). Multi-scale 3DCNN incorporated convolutional layers with different-sized kernels and pooling layers, allowing the network to process features of varying scales simultaneously (<xref ref-type="bibr" rid="B31">Lin et&#xa0;al., 2020</xref>). This enhanced the network&#x2019;s robustness and improved its tolerance to noise, distortion, and artifacts in the data, and ultimately led to a faster convergence. In addition, the stability of conventional 3DCNN may not be satisfactory and may exhibit some fluctuations and instability. In contrast, multi-scale 3DCNN perform better, possibly due to their utilization of multi-scale convolutional kernels, enabling them to extract more abundant feature information (<xref ref-type="bibr" rid="B37">Shi et&#xa0;al., 2021</xref>) (<xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6A</bold></xref>). Furthermore, the block-based method effectively increased the amount of data and helped to alleviate overfitting. In the final discrimination, this study adopted a majority principle labeling aggregation method to improve the discrimination accuracy (<xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref>). To explore the optimal block size, several experiments were conducted, the input images were segmented into different block sizes, including 5 pixels &#xd7;5 pixels, 10 pixels &#xd7;10 pixels, and 20 pixels &#xd7; 20 pixels. As shown in <xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref>, the model achieved a relatively high overall accuracy when 5 pixels &#xd7;5 pixels was used. This suggested that the small blocks with 5 pixels &#xd7;5 pixels size can effectively capture more local features of the seedy and provides more discriminative information. Conversely, larger blocks may result in information blurring and confusion, thereby impacting the classification accuracy. Consequently, the block-based method with 5 pixels &#xd7;5 pixels was finally selected to enhances the detection accuracy of seed viability.</p>
</sec>
<sec id="s3_6">
<label>3.6</label>
<title>Maize seed germination detection based on YOLOv7 model</title>
<p>
<xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7</bold></xref> shows the detection results of germinated maize seeds using the YOLOv7 model, demonstrating its remarkable precision and recall rates of 99.7% and 99.0%, respectively. Additionally, the model achieves a mAP of 99% when applying an Intersection over Union (IoU) threshold of 0.5. Furthermore, the mAP, calculated across a range of IoU thresholds from 0.5 to 0.95, reaches a value of 71%.</p>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Detection performance of YOLOv7 model for maize seed germination <bold>(A)</bold> The precision of YOLOv7, <bold>(B)</bold> The recall of YOLOv7, <bold>(C)</bold> The mAP@0.5 of YOLOv7, <bold>(D)</bold> The mAP@0.5:0.95 of YOLOv7.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-14-1248598-g007.tif"/>
</fig>
<p>In <xref ref-type="table" rid="T5"><bold>Table&#xa0;5</bold></xref>, the YOLOv7 model exhibits an impressive F1 score (The F1 score balances precision and recall, providing a comprehensive evaluation of model accuracy) of 0.99 on all target categories with a confidence threshold set at 0.663, highlighting its exceptional detection performance. Consequently, the YOLOv7 model can achieve both high precisions, accurately identifying true positive predictions, and high recall, effectively capturing all relevant targets during detection. With a confidence threshold set to 0.896, the YOLOv7 model achieves a perfect precision accuracy of 100% for the target categories. This noteworthy precision metric showcases the model&#x2019;s ability to correctly identify true positive predictions among all the positive predictions made, indicating its reliability and precision in detecting target objects. The model impressively achieves a recall rate (The recall rate quantifies the model&#x2019;s ability to correctly identify positive targets) of 1.00 with a confidence threshold set to 0.000, indicating that it accurately detects all targets of all categories without any missed detections. This ideal performance underscores the model&#x2019;s high accuracy and proficiency in target detection tasks Additionally, the model exhibits an mAP (The mAP commonly used to evaluate object detection algorithms&#x2019; accuracy and robustness) of 0.991 for all target categories when applying an Intersection over Union (IoU) threshold of 0.5. This further demonstrates the model&#x2019;s superior detection capabilities across various categories, affirming its exemplary performance.</p>
<table-wrap id="T5" position="float">
<label>Table&#xa0;5</label>
<caption>
<p>The detection result of YOLOv7 model for maize seed germination.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" colspan="6" align="center">YOLOv7 Training Indicators</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" rowspan="4" align="left"><bold>All classes</bold>
</td>
<td valign="top" align="left"><bold>F1-confidence</bold>
</td>
<td valign="top" align="left"><bold>F1</bold>
</td>
<td valign="top" align="left">0.99</td>
<td valign="top" rowspan="3" align="left"><bold>Confidence</bold>
</td>
<td valign="top" align="left">0.663</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Precision-confidence</bold>
</td>
<td valign="top" align="left"><bold>Precision</bold>
</td>
<td valign="top" align="left">1.00</td>
<td valign="top" align="left">0.896</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Recall-confidence</bold>
</td>
<td valign="top" align="left"><bold>Recall</bold>
</td>
<td valign="top" align="left">1.00</td>
<td valign="top" align="left">0.000</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Precision-recall</bold>
</td>
<td valign="top" colspan="4" align="left">0.991 mAP@0.5</td>
</tr>
</tbody>
</table>
</table-wrap>
<disp-formula>
<label>(7)</label>
<mml:math display="block" id="M7">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(8)</label>
<mml:math display="block" id="M8">
<mml:mrow>
<mml:mtext>AP&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:munderover>
<mml:mo>&#x222b;</mml:mo>
<mml:mn>0</mml:mn>
<mml:mn>1</mml:mn>
</mml:munderover>
<mml:mi>P</mml:mi>
<mml:mfenced>
<mml:mi>R</mml:mi>
</mml:mfenced>
<mml:mi>d</mml:mi>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(9)</label>
<mml:math display="block" id="M9">
<mml:mrow>
<mml:mtext>F</mml:mtext>
<mml:mn>1</mml:mn>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>*</mml:mo>
<mml:mi>P</mml:mi>
<mml:mo>*</mml:mo>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(10)</label>
<mml:math display="block" id="M10">
<mml:mrow>
<mml:mtext>P&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(11)</label>
<mml:math display="block" id="M11">
<mml:mrow>
<mml:mtext>mAP&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>n</mml:mi>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
</mml:math>
</disp-formula>
<p>In these formulas, True Positives (TP) represent the number of samples where the predicted label is positive and the actual label is also positive. T represents the total number of samples, and False Negatives (FN) indicate the number of samples where the predicted label is negative, but the actual label is positive. Similarly, False Positives (FP) represent the number of samples where the predicted label is positive, but the actual label is negative. Moreover, the area under the precision-recall (P-R) curve, denoted as AP, provides a measure of the model&#x2019;s performance.</p>
<p>
<xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8A</bold></xref> is the confusion matrix of germinated maize seed based on YOLOv7 model, which provides a visual representation of the classification performance, showing the counts of true positive, true negative, false positive, and false negative predictions. The detection accuracy was 95% for germinated seeds and 99% for ungerminated seeds, respectively. Background FP refers to the number when the background is erroneously predicted as a target, fortunately there was no background area was incorrectly classified as a target in this study. <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8B</bold></xref> shows the actual detection results of YOLOv7 for discriminating seed germination.</p>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>Confusion matrix and detection results of germination maize seed based on YOLOv7 model <bold>(A)</bold> Confusion matrix, <bold>(B)</bold> Image of detection results.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-14-1248598-g008.tif"/>
</fig>
<p>All indicators mean that the model can essentially replace manual observation for determining seed germination status. Therefore, although this method required some time and manpower for data annotation and training, the overall cost was much lower than manual operation, and can provide a reference for rapid detection of seed germination in crops. On the other hand, the algorithm suffered from the problem of duplicate detection in practical applications (<xref ref-type="bibr" rid="B11">Chen et&#xa0;al., 2023a</xref>), resulting in some seeds may be simultaneously labeled as germinated and non-germinated. This phenomenon may lead to a misclassification and reduce the practicality and reliability of the algorithm. Hence, future work will focus on improving the algorithm to solve the duplicate detection problem.</p>
</sec>
<sec id="s3_7">
<label>3.7</label>
<title>Maize seed bud length detection based on Mask R-CNN</title>
<p>The Mask R-CNN model achieved an impressive mAP score of 0.9571, indicating its effectiveness and accuracy in detecting and localizing objects. The mAP is a widely used evaluation metric for object detection models, and a high mAP score indicates that the model performs well in both precision and recall, making it a reliable choice for seed germination analysis. Additionally, the loss value during training decreased significantly, stabilizing around 0.21 from an initial value of 2.61, which is a clear indication of the model&#x2019;s ability to learn and adapt effectively.</p>
<p>
<xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9A</bold></xref> showcases a successful instance of skeleton extraction for maize seed germination, resulting in a clear main skeleton after removing branches, which allows for accurate calculation of the bud length. The detection results of bud length for germinated maize seeds, depicted in <xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9B</bold></xref>, demonstrate Mask R-CNN&#x2019;s impressive capability to accurately segment the seedlings, even when instances overlap or are occluded. This highlights the superiority of the Mask R-CNN model in instance segmentation tasks, making it a valuable tool for precise analysis of seed germination and growth.</p>
<fig id="f9" position="float">
<label>Figure&#xa0;9</label>
<caption>
<p>The bud length detection of germinated maize seeds <bold>(A)</bold> The process of skeleton extraction, <bold>(B)</bold> Probability map of predicting maize sprouts, <bold>(C)</bold> Prediction of maize seedling length, <bold>(D)</bold> Regression analysis of actual and predicted corn sprout  length values.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-14-1248598-g009.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="f9"><bold>Figures&#xa0;9C, D</bold></xref> shows the detection result of bud length with R-squared value of 0.98 and an RMSE of 1.64, demonstrating that the integration of Mask R-CNN model and skeleton extraction method could detect the bud length during seed germination accurately and rapidly. The R-squared value, also known as the coefficient of determination, is a statistical measure that indicates the proportion of the variance in the dependent variable (Bud length in this case) that can be explained by the independent variable (The predicted bud length). Meanwhile, RMSE quantifies the average magnitude of the differences between the predicted bud lengths and the actual observed bud lengths. It is worth mentioning that the bud length of germinated seeds is closely related to their viability (<xref ref-type="bibr" rid="B1">Adebisi et&#xa0;al., 2014</xref>). Therefore, the bud length of seeds can be obtained using this algorithm, and the relationship between bud length and viability can be further explored. This not only has important significance for agricultural production but also provides valuable insights for research in other biological fields.</p>
<disp-formula>
<label>(12)</label>
<mml:math display="block" id="M12">
<mml:mrow>
<mml:mtext>SSR&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mstyle displaystyle="true">
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:msubsup>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced>
<mml:mrow>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="true">^</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>&#xa0;</mml:mo>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(13)</label>
<mml:math display="block" id="M13">
<mml:mrow>
<mml:mtext>SST&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mstyle displaystyle="true">
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:msubsup>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced>
<mml:mrow>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>Y</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#xa0;</mml:mo>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(14)</label>
<mml:math display="block" id="M14">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>=</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>n</mml:mi>
</mml:mfrac>
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:munderover>
<mml:msup>
<mml:mrow>
<mml:mfenced>
<mml:mrow>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="true">^</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:msqrt>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(15)</label>
<mml:math display="block" id="M15">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>In these formulas, SSR (Sum of Squares of Residuals) refers to the regression sum of squares, which represents the sum of squared differences between the predicted values and the true values. On the other hand, SST (Total Sum of Squares) stands for the total sum of squares, representing the sum of squared differences between the true values and their mean.<italic>Y<sub>i</sub>
</italic> refers to the actual value of the i-th observation, while <inline-formula>
<mml:math display="inline" id="im1">
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="true">^</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> represents the predicted value of the i-th observation from the regression model. And n denotes the sample size.</p>
</sec>
</sec>
<sec id="s4" sec-type="conclusions">
<label>4</label>
<title>Conclusions</title>
<p>The rapid and successful detection of maize seed viability was achieved by leveraging HSI technology in combination with the multi-scale 3DCNN method. In seed viability detection, the 3DCNN method, which utilizes a limited number of representative spectral bands, was found to learn more complex features and achieve higher accuracy compared to using full-wavelength spectra and machine learning methods. By introducing the multi-scale 3DCNN model, the comprehensive consideration of both spectral and image information enabled a more comprehensive and accurate assessment of maize seed quality. Experimental results demonstrated that the adoption of small block sizes (5 pixels &#xd7; 5 pixels) significantly improved the accuracy of seed viability detection. Furthermore, the YOLOv7 model and Mask R-CNN model were introduced for germination judgment and bud length detection of maize seeds. Both models exhibited outstanding performance in germination judgment and bud length detection, demonstrating excellent detection capabilities. Based on these exceptional detection results, a novel solution for the rapid detection of maize seed germination and bud length was provided. In brief, this study proposed a reliable and effective method for the evaluation of maize seed viability, providing valuable references for agricultural production and germplasm resource preservation.</p>
</sec>
<sec id="s5" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s6" sec-type="author-contributions">
<title>Author contributions</title>
<p>YF: Conceptualization, Data collection, Data analysis, Writing &#x2013; original draft, Writing &#x2013; review &amp; editing. TA, GY, QW, WH, ZW: providing language help. CZ: Resources, Supervision. TX: Resources, Review-editing, Supervision. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<sec id="s7" sec-type="funding-information">
<title>Funding</title>
<p>This study was supported by China Postdoctoral Science Foundation (2022M720492), Postdoctoral Scientific Research Fund of Beijing Academy of Agricultural and Forestry Sciences (2022-ZZ-018), Beijing Postdoctoral Science Foundation (2023-117).</p>
</sec>
<sec id="s8" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s9" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Adebisi</surname> <given-names>M. A.</given-names>
</name>
<name>
<surname>Kehinde</surname> <given-names>T. O.</given-names>
</name>
<name>
<surname>Porbeni</surname> <given-names>J. B. O.</given-names>
</name>
<name>
<surname>Oduwaye</surname> <given-names>O. A.</given-names>
</name>
<name>
<surname>Biliaminu</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Akintunde</surname> <given-names>S. A.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Seed and seedling vigour in tropical maize inbred lines</article-title>. <source>Plant Breed. Seed Sci.</source> <volume>67</volume> (<issue>1</issue>), <fpage>87</fpage>&#x2013;<lpage>102</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.2478/v10129-011-0072-4</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Adegbuyi</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Burris</surname> <given-names>J. S.</given-names>
</name>
</person-group> (<year>1988</year>). <article-title>Field criteria used in determining the vigor of seed corn (Zea mays L.) as influenced by drying injury</article-title>. <source>J. Agron. Crop Sci.</source> <volume>161</volume> (<issue>3</issue>), <fpage>171</fpage>&#x2013;<lpage>177</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/j.1439-037X.1988.tb00651.x</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aloupogianni</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Ishikawa</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Ichimura</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Hamada</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Murakami</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Sasaki</surname> <given-names>A.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <article-title>Effects of dimension reduction of hyperspectral images in skin gross pathology</article-title>. <source>Skin Res. Technol.</source> <volume>29</volume> (<issue>2</issue>), <elocation-id>e13270</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/srt.13270</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ambrose</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Kandpal</surname> <given-names>L. M.</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>M. S.</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>W.-H.</given-names>
</name>
<name>
<surname>Cho</surname> <given-names>B.-K.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>High speed measurement of corn seed viability using hyperspectral imaging</article-title>. <source>Infrared Phys. Technol.</source> <volume>75</volume>, <fpage>173</fpage>&#x2013;<lpage>179</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.infrared.2015.12.008</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>An</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Fan</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Duan</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Dong</surname> <given-names>C.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Hyperspectral imaging technology coupled with human sensory information to evaluate the fermentation degree of black tea</article-title>. <source>Sensors Actuators B: Chem.</source> <volume>366</volume>, <fpage>131994</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.snb.2022.131994</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="book">
<person-group person-group-type="author">
<collab>Association, I.S.T</collab>
</person-group> (<year>1999</year>). <source>International rules for seed testing. Rules 1999</source>. (<publisher-loc>Zurich, Switzerland</publisher-loc>: <publisher-name>International Seed Testing Association</publisher-name>).</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bai</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Luo</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>H.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Research on wheat seed germination detection method utilizing YOLOv5</article-title>. <source>Acta Agriculturae Zhejiangensis</source> <volume>35</volume> (<issue>02</issue>), <fpage>445</fpage>&#x2013;<lpage>454</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3969/j.issn.1004-1524.2023.02.22</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cai</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Liang</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Xu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Duan</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Banana pseudostem visual detection method based on improved YOLOV7 detection algorithm</article-title>. <source>Agronomy</source> <volume>13</volume> (<issue>4</issue>), <fpage>999</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy13040999</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Casado-Garc&#xed;a</surname> <given-names>&#xc1;.</given-names>
</name>
<name>
<surname>Dom&#xed;nguez</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Garc&#xed;a-Dom&#xed;nguez</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Heras</surname> <given-names>J.</given-names>
</name>
<name>
<surname>In&#xe9;s</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Mata</surname> <given-names>E.</given-names>
</name>
<etal/>
</person-group>. (<year>2019</year>). <article-title>CLoDSA: a tool for augmentation in classification, localization, detection, semantic segmentation and instance segmentation tasks</article-title>. <source>BMC Bioinf.</source> <volume>20</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>14</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12859-019-2931-1</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Chang</surname> <given-names>Q.</given-names>
</name>
</person-group> (<year>2023</year>b). <article-title>Combination of continuous wavelet transform and successive projection algorithm for the estimation of winter wheat plant nitrogen concentration</article-title>. <source>Remote Sens.</source> <volume>15</volume> (<issue>4</issue>), <fpage>997</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs15040997</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Ma</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Ji</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Feng</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>X.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>a). <article-title>Apple inflorescence recognition of phenology stage in complex background based on improved YOLOv7</article-title>. <source>Comput. Electron. Agric.</source> <volume>211</volume>, <fpage>108048</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2023.108048</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cheng</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>R.</given-names>
</name>
<name>
<surname>She</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Pan</surname> <given-names>Z.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <article-title>Hyperspectral and imagery integrated analysis for vegetable seed vigor detection</article-title>. <source>Infrared Phys. Technol.</source> <volume>131</volume>, <fpage>104605</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.infrared.2023.104605</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Collins</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Maktabi</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Barberio</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Bencteux</surname> <given-names>V.</given-names>
</name>
<name>
<surname>Jansen-Winkeln</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Chalopin</surname> <given-names>C.</given-names>
</name>
<etal/>
</person-group>. (<year>2021</year>). <article-title>Automatic recognition of colon and esophagogastric cancer with machine learning and hyperspectral imaging</article-title>. <source>Diagnostics</source> <volume>11</volume> (<issue>10</issue>), <fpage>1810</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics11101810</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cong</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Lv</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Feng</surname> <given-names>H.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Research on instance segmentation algorithm of greenhouse sweet pepper detection based on improved mask RCNN</article-title>. <source>Agronomy</source> <volume>13</volume> (<issue>1</issue>), <fpage>196</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy13010196</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cortes</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Vapnik</surname> <given-names>V.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>Support-vector networks. Support-vector networks</article-title>. <source>Machine Learning</source> <volume>20</volume> (<issue>3</issue>), <fpage>273</fpage>&#x2013;<lpage>297</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/BF00994018</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cui</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Bing</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Miao</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Prediction of maize seed vigor based on first-order difference characteristics of hyperspectral data</article-title>. <source>Agronomy</source> <volume>12</volume> (<issue>8</issue>), <fpage>1899</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy12081899</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cui</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Cheng</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Miao</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Prediction of sweet corn seed germination based on hyperspectral image technology and multivariate data regression</article-title>. <source>Sensors</source> <volume>20</volume> (<issue>17</issue>), <fpage>4744</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s20174744</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>de Almeida</surname> <given-names>V. E.</given-names>
</name>
<name>
<surname>de Ara&#xfa;jo Gomes</surname> <given-names>A.</given-names>
</name>
<name>
<surname>de Sousa Fernandes</surname> <given-names>D. D.</given-names>
</name>
<name>
<surname>Goicoechea</surname> <given-names>H. C.</given-names>
</name>
<name>
<surname>Galv&#xe3;o</surname> <given-names>R. K. H.</given-names>
</name>
<name>
<surname>Ara&#xfa;jo</surname> <given-names>M. C. U.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Vis-NIR spectrometric determination of Brix and sucrose in sugar production samples using kernel partial least squares with interval selection based on the successive projections algorithm</article-title>. <source>Talanta</source> <volume>181</volume>, <fpage>38</fpage>&#x2013;<lpage>43</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.talanta.2017.12.064</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dewi</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>A. P. S.</given-names>
</name>
<name>
<surname>Christanto</surname> <given-names>H. J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Deep learning for highly accurate hand recognition based on yolov7 model</article-title>. <source>Big Data Cogn. Computing</source> <volume>7</volume> (<issue>1</issue>), <fpage>53</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/bdcc7010053</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fan</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>L.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Data fusion of two hyperspectral imaging systems with complementary spectral sensing ranges for blueberry bruising detection</article-title>. <source>Sensors</source> <volume>18</volume> (<issue>12</issue>), <fpage>4463</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s18124463</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Feng</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Bao</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Feng</surname> <given-names>X.</given-names>
</name>
<name>
<surname>He</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Identification of maize kernel vigor under different accelerated aging times using hyperspectral imaging</article-title>. <source>Molecules</source> <volume>23</volume> (<issue>12</issue>), <fpage>3078</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/molecules23123078</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fu</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Xia</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Fu</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Fu</surname> <given-names>W.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <article-title>Skeleton extraction and pruning point identification of jujube tree for dormant pruning using space colonization algorithm</article-title>. <source>Front. Plant Sci.</source> <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2022.1103794</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gaikwad</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Triki</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Bouaziz</surname> <given-names>B.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Measuring morphological functional leaf traits from digitized herbarium specimens using traitEx software</article-title>. <source>Biodiversity Inf. Sci. Standards</source> <volume>3</volume>, <elocation-id>e37091</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3897/biss.3.37091</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ge</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Cao</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Fu</surname> <given-names>P.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Hyperspectral image classification method based on 2D&#x2013;3D CNN and multibranch feature fusion</article-title>. <source>IEEE J. Selected Topics Appl. Earth Observations Remote Sens.</source> <volume>13</volume>, <fpage>5776</fpage>&#x2013;<lpage>5788</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/jstars.2020.3024841</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ghaderizadeh</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Abbasi-Moghadam</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Sharifi</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Tariq</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Hyperspectral image classification using a hybrid 3D-2D convolutional neural networks</article-title>. <source>IEEE J. Selected Topics Appl. Earth Observations Remote Sens.</source> <volume>14</volume>, <fpage>7570</fpage>&#x2013;<lpage>7588</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/jstars.2021.3099118</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Han</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Jiang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A spatial&#x2013;spectral combination method for hyperspectral band selection</article-title>. <source>Remote Sens.</source> <volume>14</volume> (<issue>13</issue>), <fpage>3217</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs14133217</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>He</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Gkioxari</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Doll&#xe1;r</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Girshick</surname> <given-names>R.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Mask r-cnn</article-title> in <conf-name>Proceedings of the IEEE international conference on computer vision</conf-name>. <fpage>2961</fpage>&#x2013;<lpage>2969</lpage>.</citation>
</ref>
<ref id="B28">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>K&#xf6;ppen</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>The curse of dimensionality</article-title>, in <conf-name>5th online world conference on soft computing in industrial applications</conf-name>. (WSC5). <volume>1</volume>, <fpage>4</fpage>&#x2013;<lpage>8</lpage>.</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Shen</surname> <given-names>Q.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Spectral&#x2013;spatial classification of hyperspectral imagery with 3D convolutional neural network</article-title>. <source>Remote Sens.</source> <volume>9</volume> (<issue>1</issue>), <fpage>67</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs9010067</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liang</surname> <given-names>X.-y.</given-names>
</name>
<name>
<surname>Guo</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Feng</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>J.-L.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Meng</surname> <given-names>J.-J.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Single-seed sowing increased pod yield at a reduced seeding rate by improving root physiological state of Arachis hypogaea</article-title>. <source>J. Integr. Agric.</source> <volume>19</volume> (<issue>4</issue>), <fpage>1019</fpage>&#x2013;<lpage>1032</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S2095-3119(19)62712-7</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Lin</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Bao</surname> <given-names>F.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Gait recognition with multiple-temporal-scale 3D convolutional neural network</article-title>,&#x201d; in <conf-name>Proceedings of the 28th ACM International Conference on Multimedia</conf-name>. (<publisher-loc>Seattle, WA, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>).</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Chu</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Weng</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Han</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Z.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Fusion of electronic nose and hyperspectral imaging for mutton freshness detection using input-modified convolution neural network</article-title>. <source>Food Chem.</source> <volume>385</volume>, <fpage>132651</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.foodchem.2022.132651</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Long</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Tang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>B.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Label-free detection of maize kernels aging based on Raman hyperspcectral imaging techinique</article-title>. <source>Comput. Electron. Agric.</source> <volume>200</volume>, <fpage>107229</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2022.107229</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Masood</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Nazir</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Nawaz</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Javed</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Iqbal</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Mehmood</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Brain tumor localization and segmentation using mask RCNN</article-title>. <source>Front. Comput. Sci.</source> <volume>15</volume> (<issue>6</issue>). doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11704-020-0105-y</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pang</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Men</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Yan</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Xiao</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Hyperspectral imaging coupled with multivariate methods for seed vitality estimation and forecast for Quercus variabilis</article-title>. <source>Spectrochimica Acta Part A: Mol. Biomolecular Spectrosc.</source> <volume>245</volume>, <fpage>118888</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.saa.2020.118888</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shen</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Zhen</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Segmentation of unsound wheat kernels based on improved mask RCNN</article-title>. <source>Sensors</source> <volume>23</volume> (<issue>7</issue>), <fpage>3379</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s23073379</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shi</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Du</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Gao</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Yan</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Remote sensing image fusion using multi-scale convolutional neural network</article-title>. <source>J. Indian Soc. Remote Sens.</source> <volume>49</volume> (<issue>7</issue>), <fpage>1677</fpage>&#x2013;<lpage>1687</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s12524-021-01353-2</pub-id>
</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Soeb</surname> <given-names>M. J. A.</given-names>
</name>
<name>
<surname>Jubayer</surname> <given-names>M. F.</given-names>
</name>
<name>
<surname>Tarin</surname> <given-names>T. A.</given-names>
</name>
<name>
<surname>Al Mamun</surname> <given-names>M. R.</given-names>
</name>
<name>
<surname>Ruhad</surname> <given-names>F. M.</given-names>
</name>
<name>
<surname>Parven</surname> <given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Tea leaf disease detection and identification based on YOLOv7 (YOLO-T)</article-title>. <source>Sci. Rep.</source> <volume>13</volume> (<issue>1</issue>), <fpage>6078</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-023-33270-4</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Suksungworn</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Sanevas</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Wongkantrakorn</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Fangern</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Vajrodaya</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Duangsrisai</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Phytotoxic effect of Haldina cordifolia on germination, seedling growth and root cell viability of weeds and crop plants</article-title>. <source>NJAS: Wageningen J. Life Sci.</source> <volume>78</volume> (<issue>1</issue>), <fpage>175</fpage>&#x2013;<lpage>181</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.njas.2016.05.008</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Du</surname> <given-names>Q.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Hyperspectral band selection: A review</article-title>. <source>IEEE Geosci. Remote Sens. Magazine</source> <volume>7</volume> (<issue>2</issue>), <fpage>118</fpage>&#x2013;<lpage>139</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/mgrs.2019.2911100</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>T.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Hyperspectral image classification method based on M-3DCNN-Attention</article-title>. <source>J. Appl. Remote Sens.</source> <volume>16</volume> (<issue>02</issue>), <fpage>026507</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1117/1.Jrs.16.026507</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tang</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Long-distance person detection based on YOLOv7</article-title>. <source>Electronics</source> <volume>12</volume> (<issue>6</issue>), <fpage>1502</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/electronics12061502</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tian</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Fan</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>W.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Detection of early decay on citrus using LW-NIR hyperspectral reflectance imaging coupled with two-band ratio and improved watershed segmentation algorithm</article-title>. <source>Food Chemistry</source> <volume>360</volume>, <fpage>130077</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.foodchem.2021.130077</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Triki</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Bouaziz</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Gaikwad</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Mahdi</surname> <given-names>W.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Deep leaf: Mask R-CNN based leaf detection and segmentation from digitized herbarium specimen images</article-title>. <source>Pattern Recognition Lett.</source> <volume>150</volume>, <fpage>76</fpage>&#x2013;<lpage>83</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.patrec.2021.07.003</pub-id>
</citation>
</ref>
<ref id="B45">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>C. -Y.</given-names>
</name>
<name>
<surname>Bochkovskiy</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Liao</surname> <given-names>H. -Y. M.</given-names>
</name>
</person-group> (<year>2022</year>b). &#x201c;<article-title>YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors</article-title>&#x201d;, in: <source>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition)</source>, <fpage>7464</fpage>&#x2013;<lpage>7475</lpage>. doi: <pub-id pub-id-type="doi">10.48550/arXiv.2207.02696</pub-id>
</citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Long</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Fan</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>e). <article-title>Rapid and non-destructive classification of new and aged maize seeds using hyperspectral image and chemometric methods</article-title>. <source>Front. Plant Sci.</source> <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2022.849495</pub-id>
</citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Yu</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2022</year>d). <article-title>Identification of maize with different moldy levels based on catalase activity and data fusion of hyperspectral images</article-title>. <source>Foods</source> <volume>11</volume> (<issue>12</issue>), <fpage>1727</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/foods11121727</pub-id>
</citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Zheng</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Du</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A crop image segmentation and extraction algorithm based on mask RCNN</article-title>. <source>Entropy</source> <volume>23</volume> (<issue>9</issue>), <fpage>1160</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/e23091160</pub-id>
</citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Yan</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Qi</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Jin</surname> <given-names>X.-B.</given-names>
</name>
</person-group> (<year>2022</year>c). <article-title>SVM classification method of waxy corn seeds with different vitality levels based on hyperspectral imaging</article-title>. <source>J. Sensors</source> <fpage>1</fpage>&#x2013;<lpage>13</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1155/2022/4379317</pub-id>
</citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Ji</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Zheng</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>H.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>a). <article-title>Evaluation of biochemical and physiological changes in sweet corn seeds under natural aging and artificial accelerated aging</article-title>. <source>Agronomy</source> <volume>12</volume> (<issue>5</issue>), <fpage>1028</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy12051028</pub-id>
</citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Kong</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Q.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Hyperspectral image classification based on two-branch spectral&#x2013;spatial-feature attention network</article-title>. <source>Remote Sens.</source> <volume>13</volume> (<issue>21</issue>), <fpage>4262</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs13214262</pub-id>
</citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xin</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Lin</surname> <given-names>X.-H.</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>Y.-C.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>X.-L.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Lu</surname> <given-names>X.-X.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Proteome analysis of maize seeds: the effect of artificial ageing</article-title>. <source>Physiologia Plantarum</source> <volume>143</volume> (<issue>2</issue>), <fpage>126</fpage>&#x2013;<lpage>138</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/j.1399-3054.2011.01497.x</pub-id>
</citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>XingJia</surname> <given-names>T.</given-names>
</name>
<name>
<surname>PengChang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>ZongBen</surname> <given-names>X.</given-names>
</name>
<name>
<surname>BingLiang</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Lakshmanna</surname> <given-names>K.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Calligraphy and painting identification 3D-CNN model based on hyperspectral image MNF dimensionality reduction</article-title>. <source>Comput. Intell. Neurosci.</source> <volume>2022</volume>, <fpage>1</fpage>&#x2013;<lpage>19</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1155/2022/1418814</pub-id>
</citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Ma</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Niu</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Lv</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Wei</surname> <given-names>S.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Effects of artificial aging on physiological quality and cell ultrastructure of maize (Zea mays L.)</article-title>. <source>Cereal Res. Commun</source>. <volume>51</volume>, <fpage>615</fpage>&#x2013;<lpage>626</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s42976-022-00328-4</pub-id>
</citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yasmin</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Ahmed</surname> <given-names>M. R.</given-names>
</name>
<name>
<surname>Wakholi</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Lohumi</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Mukasa</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>G.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Near-infrared hyperspectral imaging for online measurement of the viability detection of naturally aged watermelon seeds</article-title>. <source>Front. Plant Sci.</source> <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2022.986754</pub-id>
</citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Challenges in KNN classification</article-title>. <source>IEEE Trans. Knowledge Data Eng.</source> <volume>34</volume> (<issue>10</issue>), <fpage>4663</fpage>&#x2013;<lpage>4675</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/tkde.2021.3049250</pub-id>
</citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Fan</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Xiang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>Q.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Non-destructive analysis of germination percentage, germination energy and simple vigour index on wheat seeds during storage by Vis/NIR and SWIR hyperspectral imaging</article-title>. <source>Spectrochimica Acta Part A: Mol. Biomolecular Spectrosc.</source> <volume>239</volume>, <fpage>118488</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.saa.2020.118488</pub-id>
</citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Ma</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Yong</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Luo</surname> <given-names>Z.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Deep-learning-based automatic evaluation of rice seed germination rate</article-title>. <source>J. Sci. Food Agric.</source> <volume>103</volume> (<issue>4</issue>), <fpage>1912</fpage>&#x2013;<lpage>1924</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/jsfa.12318</pub-id>
</citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Ma</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Yong</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>X.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <article-title>Rice seed size measurement using a rotational perception deep learning model</article-title>. <source>Comput. Electron. Agric.</source> <volume>205</volume>, <fpage>107583</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2022.107583</pub-id>
</citation>
</ref>
<ref id="B60">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Chellappa</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Phillips</surname> <given-names>P. J.</given-names>
</name>
</person-group> (<year>1999</year>). <article-title>Subspace linear discriminant analysis for face recognition</article-title>. University of Maryland at College Park, USA: Citeseer.</citation>
</ref>
<ref id="B61">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>RDE-YOLOv7: an improved model based on YOLOv7 for better performance in detecting dragon fruits</article-title>. <source>Agronomy</source> <volume>13</volume> (<issue>4</issue>), <fpage>1042</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy13041042</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>
