<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="methods-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2024.1276799</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Plant Science</subject>
<subj-group>
<subject>Methods</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Design and experiments with a SLAM system for low-density canopy environments in greenhouses based on an improved Cartographer framework</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Tan</surname><given-names>Haoran</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2371188"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Zhao</surname><given-names>Xueguan</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1774327"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhai</surname><given-names>Changyuan</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1515863"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Fu</surname><given-names>Hao</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Chen</surname><given-names>Liping</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>*</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes" corresp="yes">
<name>
<surname>Yang</surname><given-names>Minli</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>*</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>College of Engineering, China Agricultural University</institution>, <addr-line>Beijing</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Intelligent Equipment Research Center, Beijing Academy of Agriculture and Forestry Sciences</institution>, <addr-line>Beijing</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>National Engineering Research Center for Information Technology in Agriculture</institution>, <addr-line>Beijing</addr-line>, <country>China</country></aff>
<aff id="aff4"><sup>4</sup><institution>Beijing PAIDE Science and Technology Development Co., Ltd</institution>, <addr-line>Beijing</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Xiaolan Lv, Jiangsu Academy of Agricultural Sciences (JAAS), China</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Bingbo Cui, Jiangsu University, China</p>
<p>Ping Xu, Hangzhou Dianzi University, China</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Minli Yang, <email xlink:href="mailto:qyang@cau.edu.cn">qyang@cau.edu.cn</email>; Liping Chen, <email xlink:href="mailto:chenlp@nercita.org.cn">chenlp@nercita.org.cn</email>
</p>
</fn>
<fn fn-type="equal" id="fn002">
<p>&#x2020;These authors have contributed equally to this work</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>29</day>
<month>01</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>15</volume>
<elocation-id>1276799</elocation-id>
<history>
<date date-type="received">
<day>13</day>
<month>08</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>03</day>
<month>01</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Tan, Zhao, Zhai, Fu, Chen and Yang</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Tan, Zhao, Zhai, Fu, Chen and Yang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>To address the problem that the low-density canopy of greenhouse crops affects the robustness and accuracy of simultaneous localization and mapping (SLAM) algorithms, a greenhouse map construction method for agricultural robots based on multiline LiDAR was investigated. Based on the Cartographer framework, this paper proposes a map construction and localization method based on spatial downsampling. Taking suspended tomato plants planted in greenhouses as the research object, an adaptive filtering point cloud projection (AF-PCP) SLAM algorithm was designed. Using a wheel odometer, 16-line LiDAR point cloud data based on adaptive vertical projections were linearly interpolated to construct a map and perform high-precision pose estimation in a greenhouse with a low-density canopy environment. Experiments were carried out in canopy environments with leaf area densities (LADs) of 2.945&#x2013;5.301 m<sup>2</sup>/m<sup>3</sup>. The results showed that the AF-PCP SLAM algorithm increased the average mapping area of the crop rows by 155.7% compared with that of the Cartographer algorithm. The mean error and coefficient of variation of the crop row length were 0.019&#xa0;m and 0.217%, respectively, which were 77.9% and 87.5% lower than those of the Cartographer algorithm. The average maximum void length was 0.124&#xa0;m, which was 72.8% lower than that of the Cartographer algorithm. The localization experiments were carried out at speeds of 0.2&#xa0;m/s, 0.4&#xa0;m/s, and 0.6&#xa0;m/s. The average relative localization errors at these speeds were respectively 0.026&#xa0;m, 0.029&#xa0;m, and 0.046&#xa0;m, and the standard deviation was less than 0.06&#xa0;m. Compared with that of the track deduction algorithm, the average localization error was reduced by 79.9% with the proposed algorithm. The results show that our proposed framework can map and localize robots with precision even in low-density canopy environments in greenhouses, demonstrating the satisfactory capability of the proposed approach and highlighting its promising applications in the autonomous navigation of agricultural robots.</p>
</abstract>
<kwd-group>
<kwd>mobile robot</kwd>
<kwd>lidar</kwd>
<kwd>simultaneous localization and mapping (SLAM)</kwd>
<kwd>greenhouse</kwd>
<kwd>perception</kwd>
</kwd-group>
<counts>
<fig-count count="14"/>
<table-count count="2"/>
<equation-count count="11"/>
<ref-count count="41"/>
<page-count count="16"/>
<word-count count="8050"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Sustainable and Intelligent Phytoprotection</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>The development of agricultural facilities ensures the stable and safe supply of important agricultural products while reducing the occupation of arable land. These facilities are important for promoting the modernization of agriculture in rural areas (<xref ref-type="bibr" rid="B3">Bai et&#xa0;al., 2023</xref>). The development of intelligent equipment suitable for greenhouses is needed to ensure stable and efficient production in agricultural facilities (<xref ref-type="bibr" rid="B17">Jin et&#xa0;al., 2021</xref>; <xref ref-type="bibr" rid="B38">Zhai et&#xa0;al., 2022</xref>). Due to the narrow working environment of greenhouses, the applicability of conventional field operation equipment in greenhouses is limited. The traditional manual greenhouse management method has high labor intensity, and the application of pesticides in a closed environment may cause serious harm to the human body (<xref ref-type="bibr" rid="B9">Fu et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B19">Li et&#xa0;al., 2022</xref>). Therefore, it is necessary to study intelligent navigation robots that are suitable for application in greenhouse environments (<xref ref-type="bibr" rid="B14">Huang and Sugiyama, 2022</xref>; <xref ref-type="bibr" rid="B26">Qiao et&#xa0;al., 2022</xref>).</p>
<p>Environmental perception and real-time localization are the basis of greenhouse robot navigation studies and are prerequisites for autonomous navigation (<xref ref-type="bibr" rid="B28">Shamshiri et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B5">Chen et&#xa0;al., 2020</xref>). However, the closed nature of the greenhouse environment severely blocks satellite signals. Therefore, it is highly important to study perception and localization methods that do not depend on satellite navigation to develop navigation technology for greenhouse environments (<xref ref-type="bibr" rid="B22">Mendes et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B7">Choi et&#xa0;al., 2022</xref>). High-precision mapping and localization of agricultural robots are critical for the automation of greenhouse operations (<xref ref-type="bibr" rid="B8">Dong et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B34">Westling et&#xa0;al., 2021</xref>; <xref ref-type="bibr" rid="B41">Zhou et&#xa0;al., 2021</xref>). Using the prior map, the robot can obtain prior information about the environment and realize global path planning to achieve safe and accurate autonomous navigation. To this end, researchers have carried out many studies in the fields of sensor environment perception and simultaneous localization and mapping (SLAM) (<xref ref-type="bibr" rid="B21">Matsuzaki et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B29">Shi et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B24">Ouyang et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B15">Jiang and Ahamed, 2023</xref>; <xref ref-type="bibr" rid="B30">Su et&#xa0;al., 2023</xref>).</p>
<p>In recent years, with the development of computer technology and edge computing equipment, the sensors used for greenhouse operating environment perception and localization have included ultrasonic technology (<xref ref-type="bibr" rid="B25">Palleja and Landers, 2017</xref>; <xref ref-type="bibr" rid="B6">Chen et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B18">Lao et&#xa0;al., 2021</xref>), ultra-wideband (UWB) technology (<xref ref-type="bibr" rid="B12">Hou et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B37">Yao et&#xa0;al., 2021</xref>), LiDAR technology (<xref ref-type="bibr" rid="B4">Chen et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B40">Zhang et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B27">Saha et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B31">Sun et&#xa0;al., 2022</xref>), and machine vision technology (<xref ref-type="bibr" rid="B23">Nissimov et&#xa0;al., 2015</xref>; <xref ref-type="bibr" rid="B32">Wang et&#xa0;al., 2022a</xref>; <xref ref-type="bibr" rid="B33">Wang et&#xa0;al., 2022b</xref>). <xref ref-type="bibr" rid="B13">Huang et&#xa0;al. (2021)</xref> designed a robot localization system based on spread spectrum sounds for a greenhouse containing a strawberry ridge and achieved centimeter-level localization accuracy in a small greenhouse. However, the coverage of sound localization technology is limited, which significantly increases the cost in large-scale scenarios. Aiming to address the problem of insufficient features in greenhouse environments, <xref ref-type="bibr" rid="B39">Zhang et&#xa0;al. (2022)</xref> proposed a visual localization method based on benchmark markers and factor graphs. This method considers the constraint relationship between robot motion characteristics and variables, and the standard deviation of the localization error is less than 0.05&#xa0;m. This method solves the problem of unstructured and insufficient features in greenhouses by adding benchmark markers to the SLAM front-end module. Although this method can achieve stable localization of the robot, it depends on the number of tags detected. In the case in which the plant is occluded or the number of tags is insufficient, inaccurate localization or even failure may occur. <xref ref-type="bibr" rid="B36">Yan et&#xa0;al. (2022)</xref> designed a loosely coupled real-time localization and mapping system based on an extended Kalman filter and visual-inertial odometry (VIO) using multisensor fusion and the visual-IMU-wheel odometry method to achieve accurate pose estimation and dense three-dimensional (3D) point cloud mapping in greenhouses. However, while the localization method based on visual SLAM performs well under good light conditions, the high light intensity in the environment interferes with the extraction of visual features, which may cause issues with map construction in the greenhouse environment and errors in robot pose calculation results.</p>
<p>To address the effects of greenhouse environment lighting on robot perception, LiDAR, which has advantages such as high stability and robustness, is a competitive perception and localization technology for all-weather greenhouse operations. The AgriEco Robot, designed by <xref ref-type="bibr" rid="B1">Abanay et&#xa0;al. (2022)</xref>, is based on a two-dimensional (2D) LiDAR sensor and performs autonomous navigation. This robot can accurately navigate between rows of strawberry greenhouse crops, detect the end of a row, and switch to the next row. To solve the problem of inaccurate localization caused by the lack of structure in grape greenhouse scenes, <xref ref-type="bibr" rid="B2">Aguiar et&#xa0;al. (2022)</xref> designed a VineSLAM algorithm. The algorithm is based on 3D LiDAR point cloud extraction and uses half-plane features to construct an environmental map. The robot can achieve accurate localization in symmetrical long vineyard corridors. However, with seasonal changes, a decrease in grape canopy density affects map construction results and localization effects. <xref ref-type="bibr" rid="B20">Long et&#xa0;al. (2022)</xref> proposed a method to reduce the cumulative errors of odometers in long-distance greenhouse operations and the problem of mapping and localization accuracy. Their method is based on the UWB/IMU/ODOM/LiDAR-integrated localization system and integrates LiDAR with the two-dimensional map established through the adaptive Monte Carlo localization (AMCL) algorithm for global localization of the robot. The method is suitable for relatively open greenhouse environments where shorter crops are planted. However, in greenhouse environments with taller crops, due to the physical characteristics of UWB technology, the occlusion of plants affects the stability of the SLAM system. However, due to the large number of unstructured crops and irregular planting gaps in greenhouses, it is difficult for LiDAR-based methods to obtain clear map boundaries. <xref ref-type="bibr" rid="B12">Hou et&#xa0;al. (2020)</xref> developed a greenhouse robot navigation system based on a Cartographer with dual LiDAR. This system improved the efficiency of map building and enabled robotic mapping and autonomous navigation in a strawberry greenhouse environment. However, as the environmental map construction of strawberry plants is based on strawberry ridges, the results cannot be generalized to typical unridged greenhouse environments. At present, there are no researchers dedicated to solving the laser SLAM problem in greenhouse sparse feature environments. In the industrial field, <xref ref-type="bibr" rid="B35">Xie et&#xa0;al. (2021)</xref> proposed a visual-inertial fusion SLAM method for sparse lunar feature environments that integrates visual measurements and inertial sensor information via pose optimization methods to achieve high-precision joint positioning and improve the accuracy of relative pose estimation between key frames.</p>
<p>Although the above studies proposed improved methods to address problems such as greenhouse terrains, cumulative odometer errors, and irregular greenhouse structures, the impact of the sparse canopy characteristics of greenhouse crops on laser SLAM-based methods has not been considered. For crop environments such as low-density canopies in greenhouses, traditional laser SLAM methods have difficulty constructing accurate and complete environmental maps due to the loss of contour information caused by map degradation; thus, these methods cannot meet the localization accuracy needs for agricultural robot operation. In addition, SLAM methods based on 2D LiDAR can obtain environmental information only at the installation height level, while SLAM methods based on 3D LiDAR have higher computational costs and higher requirements for achieving good industrial computing performance (<xref ref-type="bibr" rid="B16">Jiang et&#xa0;al., 2022</xref>). The purpose of this study was to explore a low-cost spatial downsampling-based map construction and localization method based on the Cartographer framework to optimize the map construction effect and localization accuracy in suspended crop environments in greenhouses to construct maps and realize high-precision pose estimates in low-density canopy environments in greenhouses.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Test platform</title>
<p>The map construction and localization hardware system built in this paper is primarily composed of a perception module, control module, power module, and drive module, as shown in <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref>. The Autolabor Pro1 platform produced by Qingke Intelligent Company (Shanghai, China) was selected as the robot chassis. The maximum movement speed of the robot is 1&#xa0;m/s, and the robot can carry a maximum load of 50&#xa0;kg. The four-wheel differential control of the robot can achieve stationary turning, allowing for turning in narrow spaces. The size was 726 &#xd7; 617 &#xd7; 273&#xa0;mm, and the system was equipped with an RS-LiDAR-16 three-dimensional LiDAR system from Suteng Juchuang Company (Shenzhen, China). The LiDAR system can collect 300,000 data points per second, the acquisition frequency ranges from 5 to 20&#xa0;Hz, the horizontal viewing angle range is 360&#xb0;, and the resolution ranges from 0.1&#xb0; to 0.4&#xb0;. The vertical viewing angle is 30&#xb0;, and the resolution is 2&#xb0;. The measurement range is 0.2&#x2013;100 m. The industrial computer that the robot is equipped with the Ubuntu 18.04 operating system, and the algorithm for the overall software system of the greenhouse mobile robot is designed based on the ROS Melodic. The specific model of the sensor is shown in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Map construction and localization hardware system of the greenhouse mobile robot.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g001.tif"/>
</fig>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Greenhouse map construction and localization system equipment model.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">No.</th>
<th valign="top" align="center">Equipment</th>
<th valign="top" align="center">Specification</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">1</td>
<td valign="top" align="center">Encoder</td>
<td valign="top" align="center">HTS-5008 encoder</td>
</tr>
<tr>
<td valign="top" align="center">2</td>
<td valign="top" align="center">LiDAR</td>
<td valign="top" align="center">RS-LiDAR-16LiDAR</td>
</tr>
<tr>
<td valign="top" align="center">3</td>
<td valign="top" align="center">Battery</td>
<td valign="top" align="center">24 V 40AH lithium iron phosphate battery</td>
</tr>
<tr>
<td valign="top" align="center">4</td>
<td valign="top" align="center">Motor drive</td>
<td valign="top" align="center">BLDH-750 brushless motor drive</td>
</tr>
<tr>
<td valign="top" align="center">5</td>
<td valign="top" align="center">Controller</td>
<td valign="top" align="center">STM32F103zet6 computer on a chip</td>
</tr>
<tr>
<td valign="top" align="center">6</td>
<td valign="top" align="center">Industrial computer</td>
<td valign="top" align="center">AMD Ryzen3 3200G</td>
</tr>
<tr>
<td valign="top" align="center">7</td>
<td valign="top" align="center">Chassis</td>
<td valign="top" align="center">Autolabor Pro1</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The STM32 microcontroller can control the linear speed and angular velocity of the mobile robot, provide real-time feedback based on the motion state information of the robot, and interact with the industrial computer through RS232 serial communication. The industrial computer collects, fuses, and processes the robot sensor information; constructs the map; plans the path; and performs autonomous localization and navigation according to the control instructions. The ROS system installed on the industrial computer has a distributed architecture, which allows each functional module in the framework to be designed and compiled separately during runtime, with loose coupling between the modules.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Research method</title>
<sec id="s2_2_1">
<label>2.2.1</label>
<title>Improved Cartographer algorithm design</title>
<p>The Cartographer algorithm (<xref ref-type="bibr" rid="B11">Hess et&#xa0;al., 2016</xref>) is an open-source laser SLAM framework proposed by Google that is widely used in the fields of robotics and autonomous systems. The Cartographer algorithm mainly establishes a series of submaps based on LiDAR data, inserts point cloud data into the submap through scan matching, and forms a complete map through loop closure detection and optimization, eliminating the cumulative error between submaps. The software framework of the adaptive filtering point cloud projection (AF-PCP) SLAM system based on the Cartographer algorithm is shown in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>AF-PCP SLAM algorithm framework. AF-PCP, adaptive filtering point cloud projection.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g002.tif"/>
</fig>
<p>The LiDAR driver releases 3D point cloud data/rsldiar_points and performs <italic>z</italic>-axis threshold filtering based on the point cloud by setting the LiDAR perception region of interest (ROI). The key parameters of the algorithm are as follows: <italic>min_height</italic> represents the minimum height of the <italic>z</italic>-axis involved in point cloud compression, and the unit is m; <italic>max_height</italic> represents the maximum height involved in point cloud compression, and the unit is m; <italic>range_min</italic> and <italic>range_min</italic> represent the minimum and maximum measurement ranges of the output point cloud, respectively, and the units are both m; and <italic>scan_time</italic> represents the scanning time, and the unit is s. The data format of the input point cloud is sensor_msgs/PointCloud2, and the message format of the processed point cloud is sensor_msgs/LaserScan. The pose of the robot is estimated by a two-wheel differential kinematics model, and the odometer data are published in the message format of nav_msgs/Odometry.</p>
<p>The greenhouse tomato plant is taken as an example, and the height of the tomato plant is 1.5&#x2013;2 m. First, the ROI of the greenhouse robot is set, and the ROI is selected based on the LiDAR coordinate system to delineate a three-dimensional ROI. The motion speed of the greenhouse robot, the braking distance, and the reserved space between the body and the crops are considered in determining the area. The specific parameters are set as follows: <italic>min_height</italic> is set to 0.5&#xa0;m, <italic>max_height</italic> is set to 1.5&#xa0;m, <italic>scan_time</italic> is set to 0.1 s, <italic>range_min</italic> is set to 0.2&#xa0;m, and <italic>range_max</italic> is set to 15&#xa0;m. Based on the above parameter settings, the adaptive projection process for the greenhouse crop point clouds is designed, as shown in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Flowchart of the greenhouse crop point cloud adaptive projection algorithm.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g003.tif"/>
</fig>
<p>After receiving a frame of complete multiline LiDAR data, the greenhouse map construction system filters the point cloud data outside the perceived ROI area. The filtered point cloud data <inline-formula>
<mml:math display="inline" id="im1">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>=</mml:mo>
<mml:mo>{</mml:mo>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mn>3</mml:mn>
</mml:msub>
<mml:mo>&#x2026;</mml:mo>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>n</mml:mi>
</mml:msub>
<mml:mo>}</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> are stored in the array <italic>points_range</italic>. For the adaptive vertical projection of the point cloud, the specific processing steps are as follows: First, the <italic>k</italic>th scanning frame of the LiDAR <inline-formula>
<mml:math display="inline" id="im2">
<mml:mrow>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is selected, each point in the frame is represented <inline-formula>
<mml:math display="inline" id="im3">
<mml:mrow>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>z</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, and an iterator is used to record the starting position of the point. The <italic>x</italic> coordinate information of the point cloud is obtained; the <italic>y</italic> and <italic>z</italic> information and intensity value <italic>i</italic> corresponding to the point are obtained using the offset pointers <italic>offset_y</italic>, <italic>offset_z</italic>, and <italic>offset_i</italic>; the point <inline-formula>
<mml:math display="inline" id="im4">
<mml:mrow>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> is represented in the LiDAR Cartesian coordinate system. The points are converted to the polar coordinate representation of the <italic>x</italic>&#x2013;<italic>y</italic> plane by vertical projection. The Euclidean distance <inline-formula>
<mml:math display="inline" id="im5">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, angle <inline-formula>
<mml:math display="inline" id="im6">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b8;</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, and index <inline-formula>
<mml:math display="inline" id="im7">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>d</mml:mi>
<mml:mi>e</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> of the point in the polar coordinate system are calculated, and the calculation formulas are shown in <xref ref-type="disp-formula" rid="eq1">Formulas 1</xref>&#x2013;<xref ref-type="disp-formula" rid="eq4">4</xref>.</p>
<disp-formula id="eq1">
<label>(1)</label>
<mml:math display="block" id="M1">
<mml:mrow>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mo>{</mml:mo>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>&#x3b8;</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>}</mml:mo>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="eq2">
<label>(2)</label>
<mml:math display="block" id="M2">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>k</mml:mi>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>+</mml:mo>
<mml:msubsup>
<mml:mi>y</mml:mi>
<mml:mi>k</mml:mi>
<mml:mn>2</mml:mn>
</mml:msubsup>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="eq3">
<label>(3)</label>
<mml:math display="block" id="M3">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b8;</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mi>arctan</mml:mi>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="eq4">
<label>(4)</label>
<mml:math display="block" id="M4">
<mml:mrow>
<mml:msub>
<mml:mtext mathvariant="italic">index</mml:mtext>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mtext>arctan</mml:mtext>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:mo>+</mml:mo>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mi>r</mml:mi>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math display="inline" id="im8">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mtext>&#x2009;</mml:mtext>
<mml:msub>
<mml:mi>&#x3b8;</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> are the distance and angle of the laser point in the polar coordinate system, <inline-formula>
<mml:math display="inline" id="im9">
<mml:mrow>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the corresponding intensity value, <inline-formula>
<mml:math display="inline" id="im10">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math display="inline" id="im11">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> are the corresponding coordinate values, and <italic>r</italic> is the angular resolution of the LiDAR system. The selected LiDAR angle beam contains 1,800 lines, and the angular resolution calculation formula is <inline-formula>
<mml:math display="inline" id="im12">
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1800</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</inline-formula> &#x2248; 0.00349.</p>
<p>Similarly, the point distances at this angle for the remaining 15-beam LiDAR data are calculated. The minimum distance obtained by the sorting algorithm is stored in the <italic>scan_ranges</italic> container as the distance value of the point.</p>
<p>Since the acquisition of laser point cloud data is not instantaneous, the robot distorts the motion of the point cloud data in the motion state. To eliminate this motion distortion, in this paper, the wheel odometer data calculated by the encoder are used to increase the pose update frequency, reflecting the pose change of the greenhouse robot during the laser data acquisition process. By calculating the odometer pose in the coordinate system corresponding to each point cloud in the current frame of the point cloud data, the coordinates of each point cloud are transformed into the same coordinate system, with the point of the first laser serving as the origin according to the obtained pose.</p>
<p>It is assumed that the robot accelerates uniformly in the process of collecting a frame of point cloud data. To ensure the accuracy of the data, a double-ended queue is used to save the point cloud data and ensure that there are at least two data points in the queue to prevent the data time of the wheel odometer from being less than the time of receiving the point cloud data. The start time and end time for a frame of point cloud data are <inline-formula>
<mml:math display="inline" id="im13">
<mml:mrow>
<mml:msub>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math display="inline" id="im14">
<mml:mrow>
<mml:msub>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, respectively, and the corresponding starting position and end position of the origin of the LiDAR coordinate system in the odometer coordinate system are expressed as <inline-formula>
<mml:math display="inline" id="im15">
<mml:mrow>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math display="inline" id="im16">
<mml:mrow>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, respectively. A total of <italic>n</italic> pose information data points <inline-formula>
<mml:math display="inline" id="im17">
<mml:mrow>
<mml:mo>{</mml:mo>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>t</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>t</mml:mi>
<mml:mn>2</mml:mn>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>t</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>}</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> are obtained. Linear interpolation is performed based on the LiDAR pose information to obtain the approximate odometer pose corresponding to the timestamp for the point cloud data. By transforming the point cloud data into the odometer coordinate system, LiDAR point cloud data based on the robot coordinate system can be obtained in cases with distortion. The method for calculating the coordinates of point <inline-formula>
<mml:math display="inline" id="im18">
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>y</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> in the odometer coordinate system is shown in <xref ref-type="disp-formula" rid="eq5">Formula 5</xref>:</p>
<disp-formula id="eq5">
<label>(5)</label>
<mml:math display="block" id="M5">
<mml:mrow>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>y</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="eq6a">
<label>(6a)</label>
<mml:math display="block" id="M6">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>=</mml:mo>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>cos</mml:mi>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
</mml:mtd>
<mml:mtd>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>sin</mml:mi>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>sin</mml:mi>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
</mml:mtd>
<mml:mtd>
<mml:mrow>
<mml:mi>cos</mml:mi>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="eq7a">
<label>(7a)</label>
<mml:math display="block" id="M7">
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:mo>=</mml:mo>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math display="inline" id="im19">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>y</mml:mi>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> are the coordinates of the odometer coordinate system after conversion, <inline-formula>
<mml:math display="inline" id="im20">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> are the point cloud data points before the transformation, and <italic>R</italic> (<xref ref-type="disp-formula" rid="eq6a">Formula 6a</xref>) is a rotation matrix that describes two coordinate systems. <italic>l</italic> (<xref ref-type="disp-formula" rid="eq7a">Formula 7a</xref>) is a translation vector that describes two coordinate systems, <italic>&#x3b8;</italic> is the polar coordinate system angle corresponding to the point cloud, and <inline-formula>
<mml:math display="inline" id="im21">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> is the translation amount from the odometer coordinate system to the LiDAR coordinate system.</p>
<p>The laser point cloud data are transformed from the odometer coordinate system to the reference coordinate system of the data frame, and the starting point in the coordinate system of the current frame of the point cloud data is the reference coordinate system of the data frame. The method for calculating the coordinate of the transformed point <inline-formula>
<mml:math display="inline" id="im22">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msubsup>
<mml:mi>o</mml:mi>
<mml:mi>x</mml:mi>
<mml:mo>'</mml:mo>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:msubsup>
<mml:mi>o</mml:mi>
<mml:mi>y</mml:mi>
<mml:mo>'</mml:mo>
</mml:msubsup>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> in the reference coordinate system is shown in <xref ref-type="disp-formula" rid="eq6b">Formula 6b</xref>:</p>
<disp-formula id="eq6b">
<label>(6b)</label>
<mml:math display="block" id="M8">
<mml:mrow>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msubsup>
<mml:mi>o</mml:mi>
<mml:mi>x</mml:mi>
<mml:mo>'</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msubsup>
<mml:mi>o</mml:mi>
<mml:mi>y</mml:mi>
<mml:mo>'</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>y</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math display="inline" id="im23">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> are the coordinates of the origin of the reference coordinate system in the odometer coordinate system and <italic>R<sup>&#x2212;</sup>
</italic><sup>1</sup> is the inverse matrix that describes the rotation matrix of two coordinate systems.</p>
<p>Finally, the frame head timestamp, intensity value, and other information for each frame of the LiDAR point cloud data are supplemented, and the LiDAR data after the adaptive vertical projection process are output in the sensor_msgs/LaserScan format with a release frequency of 10&#xa0;Hz. The effect of the point cloud processing is shown in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Point cloud processing results. <bold>(A)</bold> Before processing point cloud. <bold>(B)</bold> After processing point cloud.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g004.tif"/>
</fig>
<p>The scan-to-map matching method was used to construct the map, as shown in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>. First, we used a submap to organize the whole map. Each submap consists of several LiDAR scanning frames {Scan(n)}, and the complete global map is composed of all the submaps. Assuming that the initial pose of the robot is <italic>&#x3be;</italic><sub>1</sub> = (0, 0, 0), the LiDAR scanning frame is denoted as Scan<sub>1</sub> (1). At this pose, the first submap (1) is initialized by Scan<sub>1</sub> (1). The robot pose <italic>&#x3be;</italic><sub>2</sub> corresponding to Scan<sub>1</sub> (2) is calculated by the scan-to-map matching method, and Scan<sub>1</sub> (2) is added to Submap (1) based on pose <italic>&#x3be;</italic><sub>2</sub>. The scan-to-map matching method is continuously executed, and the newly obtained LiDAR frames are added until the new LiDAR frame is completely included in Submap (1); that is, when the new LiDAR frame does not contain new information other than that included in Submap (1), the creation of Submap (1) is completed. The above process is performed to create all the submaps (m). Finally, all the local subgraphs {Submap(m)} are used to form the complete global map.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Schematic diagram of the map construction process.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g005.tif"/>
</fig>
</sec>
<sec id="s2_2_2">
<label>2.2.2</label>
<title>Layout of the test site</title>
<p>In this paper, a simulated greenhouse scene was built, which included 30&#xa0;mm * 30&#xa0;mm aluminum profiles, fixed bases, and simulated plants. The simulated crop has three rows; each row is 4&#xa0;m long, the row height is 1.3&#xa0;m, and the row spacing is 1.2&#xa0;m. The test scene is shown in <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>. In the crop row, aluminum profiles were used as support rods every other distance, a set of simulated crops was arranged every 0.2&#xa0;m, and the number of leaves per crop was counted.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Simulated greenhouse test scene. <bold>(A)</bold> Physical display of simulated test scenarios. <bold>(B)</bold> Schematic diagram of simulation test scenario.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g006.tif"/>
</fig>
</sec>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Test scheme</title>
<sec id="s2_3_1">
<label>2.3.1</label>
<title>Different sparse degree mapping tests</title>    <p>To verify the mapping effect of the algorithm for different sparse canopy crops in the greenhouse, based on the robot platform built in Section 2.1 and the simulated greenhouse scene constructed in Section 2.2.2, the crops were randomly pruned five times, and 10% of the leaves were pruned each time. A total of six simulated environments with different degrees of sparseness were generated. The experiment was carried out at the National Agricultural Information Demonstration Test Base in Xiaotangshan town, Beijing, in May 2023. In this experiment, the AF-PCP SLAM algorithm was used for SLAM mapping, and the Cartographer algorithm was used for the control group. The remote control robot moves between the crop rows to construct a map of the entire environment. The specific mapping operation was performed as follows:</p>
<list list-type="simple">
<list-item>
<p>1 The ROS core node, chassis control node, and LiDAR mapping node are started.</p>
</list-item>
<list-item>
<p>2 The robot movement speed is set to 0.2&#xa0;m/s, and the remote control operation is performed according to the running track for the interrow operation.</p>
</list-item>
<list-item>
<p>3 The visualization tool Rviz is used to monitor the robot mapping results in real time, and the map is saved after map construction is completed.</p>
</list-item>
</list>
<p>The canopy biomass, leaf area (LA), leaf area index (LAI), and leaf area density (LAD) are the main indicators of canopy density. The distribution of leaf density in the canopy is random and unpredictable in three-dimensional space, which complicates the quantitative analysis process (<xref ref-type="bibr" rid="B10">Gu et&#xa0;al., 2021</xref>). In this paper, the LAD was used as a measure of canopy density to divide the six experiments. A YMJ-G leaf area meter (Shandong Fangke Instrument Co., Ltd., Shandong, China) was used to measure the leaf density 10 times. The average leaf area was 21.203 cm<sup>2</sup>, and environments with six LAD values were constructed. The number of crop row pixels, the crop row length, and the maximum gap length for each LAD were used as evaluation indicators. The actual length of the crop row was 3.985&#xa0;m, and the actual value of the crop row spacing gap was 0.08&#xa0;m. The method for counting the number of pixels involved counting the number of pixels in the middle crop row in the PGM image via the two algorithms. The crop row length and maximum gap length were calculated by importing the Pbstream file calculated by the two algorithms into the cost map tool under the ROS open source function package Movebase; the scale measurement tool in Rviz was used to measure the crop row length in the middle row, and the maximum gap length was determined.</p>
</sec>
<sec id="s2_3_2">
<label>2.3.2</label>
<title>Localization system performance test</title>
<p>To verify the localization accuracy of the proposed method at different speeds, the robot was controlled to move along the crop row by setting different robot motion speeds, namely, a low speed of 0.2&#xa0;m/s, a medium speed of 0.4&#xa0;m/s, and a high speed of 0.6&#xa0;m/s. The ROSBag tool was used to record the real-time data of the sensor and the localization algorithm output results. The starting position in each test was aligned with a laser pen to ensure that the initial pose in each test was the same, as shown in <xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7A</bold></xref>.</p>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Pose alignment and test environment localization equipment construction. <bold>(A)</bold> Using a laser pointer for pose alignment. <bold>(B)</bold> test environment localization equipment construction.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g007.tif"/>
</fig>
<p>In this paper, according to the localization evaluation equipment selected in Reference (<xref ref-type="bibr" rid="B39">Zhang et&#xa0;al., 2022</xref>), the Starter Set Super-MP-3D ultrasonic localization device from the Marvelmind Company was selected to determine the true value to evaluate the localization accuracy of the robot. The device consists of four fixed labels, a mobile vehicle label, and a localization route. The device can theoretically obtain an accuracy of up to &#xb1;2 cm. In addition, in this paper, the dead reckoning localization method was used as a comparison method to evaluate the performance of the AF-PCP SLAM algorithm comprehensively. Four localization labels were strategically placed in the greenhouse, and four vertices in the test area were selected for placement. To reduce the interference of crop occlusion on ultrasonic signals and ensure maximum signal coverage, four positioning labels were placed at the vertices of two outer crop rows. Fixing labels at the same height can improve positioning accuracy. Each label was fixed on a beam 1.4&#xa0;m above the ground, as shown in <xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7B</bold></xref>.</p>
<p>To prove the effectiveness of the AF-PCP SLAM algorithm, the localization accuracy was evaluated using EVO (<ext-link ext-link-type="uri" xlink:href="https://github.com/MichaelGrupp/evo">https://github.com/MichaelGrupp/evo</ext-link>, accessed on June 28, 2022). To verify the results, the ultrasonic localization data were used as the ground truth to analyze the localization effect. The relative pose error (RPE) describes the accuracy of the two-frame pose difference between the estimated pose and the real pose at a fixed time difference <italic>t</italic>, which is equivalent to the error of directly measuring the pose results. The RPE for frame <italic>i</italic> is shown in <xref ref-type="disp-formula" rid="eq7b">Formula 7b</xref>:</p>
<disp-formula id="eq7b">
<label>(7b)</label>
<mml:math display="block" id="M9">
<mml:mrow>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mtext>i</mml:mtext>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msubsup>
<mml:mi>Q</mml:mi>
<mml:mi>i</mml:mi>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:msub>
<mml:mi>Q</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>&#x394;</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mo stretchy="false">(</mml:mo>
<mml:msubsup>
<mml:mi>P</mml:mi>
<mml:mi>i</mml:mi>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>&#x394;</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math display="inline" id="im24">
<mml:mrow>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mtext>i</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> represents the RPE of the <italic>i</italic>th frame, <inline-formula>
<mml:math display="inline" id="im25">
<mml:mrow>
<mml:msub>
<mml:mi>Q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> represents the true ultrasonic pose value, <inline-formula>
<mml:math display="inline" id="im26">
<mml:mrow>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> represents the estimated pose value, and <inline-formula>
<mml:math display="inline" id="im27">
<mml:mrow>
<mml:mi>&#x394;</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents a fixed interval time coefficient.</p>
<p>Assuming that there are <italic>n</italic> pose frames, the <italic>n</italic> &#x2212; <inline-formula>
<mml:math display="inline" id="im28">
<mml:mrow>
<mml:mi>&#x394;</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>RPE values can be calculated, and the total value is obtained using the root mean square error (RMSE) statistics, as shown in <xref ref-type="disp-formula" rid="eq8">Formula 8</xref>:</p>
<disp-formula id="eq8">
<label>(8)</label>
<mml:math display="block" id="M10">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>:</mml:mo>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mi>&#x394;</mml:mi>
<mml:mi>t</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
<mml:mo>=</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>m</mml:mi>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:mo>&#x2016;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
<mml:mo>&#x2016;</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mn>2</mml:mn>
</mml:mfrac>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Here, <inline-formula>
<mml:math display="inline" id="im29">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>=</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>&#x394;</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math display="inline" id="im30">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> represent the translation of the RPE. To comprehensively evaluate the performance of the algorithm, the average RMSE is calculated, as shown in <xref ref-type="disp-formula" rid="eq9">Formula 9</xref>:</p>
<disp-formula id="eq9">
<label>(9)</label>
<mml:math display="block" id="M11">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>:</mml:mo>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>n</mml:mi>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>&#x394;</mml:mi>
<mml:mi>t</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:munderover>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>:</mml:mo>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mi>&#x394;</mml:mi>
<mml:mi>t</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
</mml:math>
</disp-formula>
</sec>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="s3_1">
<label>3.1</label>
<title>Mapping performance</title>
<p>To verify the wide applicability of the AF-PCP SLAM algorithm for different crop densities, six tests were carried out in the same environment. The localization trajectory and surrounding environment information of the different methods are displayed in real time in Rviz, as shown in <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8</bold></xref>.</p>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>Visualization of the results of the Rviz map construction and localization trajectories. <bold>(A)</bold> The map construction results of the Cartographer algorithm. <bold>(B)</bold> The map construction results of the AF-PCP SLAM algorithm.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g008.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8</bold></xref> shows that the Cartographer algorithm accurately constructs structured walls and glass into grayscale grid maps. However, these maps are still not perfect. Unstructured suspended crops occupy most of the space in the greenhouse environment. However, based on the mapping results, the Cartographer algorithm results in a large loss of crop row mapping, whereas the AF-PCP SLAM algorithm yields more accurate mapping results. To better show the effect of the AF-PCP SLAM algorithm on crop row mapping under different degrees of sparseness, <xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9</bold></xref> shows the map construction results of the two methods for six degrees of sparseness.</p>
<fig id="f9" position="float">
<label>Figure&#xa0;9</label>
<caption>
<p>Results of the mapping of the two algorithms. The map construction results of the Cartographer algorithm under sparsities of <bold>(A)</bold> 100%, <bold>(C)</bold> 90%, <bold>(E)</bold> 80%, <bold>(G)</bold> 70%, <bold>(I)</bold> 60%, and <bold>(K)</bold> 50%. The map construction results of the AF-PCP SLAM algorithm under sparsities of <bold>(B)</bold> 100%, <bold>(D)</bold> 90%, <bold>(F)</bold> 80%, <bold>(H)</bold> 70%, <bold>(J)</bold> 60%, and <bold>(L)</bold> 50%. AF-PCP, adaptive filtering point cloud projection.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g009.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9</bold></xref> shows the crop mapping results of the Cartographer algorithm and AF-PCP SLAM algorithm for six degrees of sparseness. According to the LAD calculation method presented in Section 2.3.1, the LADs corresponding to the six sparsities are 5.301 m<sup>2</sup>/m<sup>3</sup>, 4.830 m<sup>2</sup>/m<sup>3</sup>, 4.358 m<sup>2</sup>/m<sup>3</sup>, 3.887 m<sup>2</sup>/m<sup>3</sup>, 3.416 m<sup>2</sup>/m<sup>3</sup>, and 2.945 m<sup>2</sup>/m<sup>3</sup>. The quantitative statistical results, including the number of pixels, the crop row lengths, and the maximum gap lengths, for the six LADs were calculated, as shown in <xref ref-type="fig" rid="f10"><bold>Figure&#xa0;10</bold></xref>.</p>
<fig id="f10" position="float">
<label>Figure&#xa0;10</label>
<caption>
<p>Map quantitative indicators under different LADs. The black, red, and blue symbols represent the number of pixels in the crop row, the length of the crop row, and the maximum gap length, respectively. LADs, leaf area densities.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g010.tif"/>
</fig>
<p>According to the results shown in <xref ref-type="fig" rid="f10"><bold>Figure&#xa0;10</bold></xref>, as the LAD increased, the number of pixels in the crop row generally increased. The AF-PCP SLAM algorithm outperforms the Cartographer algorithm in terms of the number of pixels. The number of pixels in the crop row reflects the effective area of the&#xa0;grid map, and the results indicate that the AF-PCP SLAM algorithm is better at constructing the map of the crop row. Specifically, the number of pixels in the AF-PCP SLAM algorithm increased by 63.3%, 144.4%, 160%, 100%, 350%, and 116.7%, with an average increase of 155.7%.</p>
<p>In addition, as the LAD increased, the length of the crop rows constructed by the AF-PCP SLAM algorithm was close to the real length of the crop rows. When the LAD was greater than 3.877 m<sup>2</sup>/m<sup>3</sup>, the length of the crop row constructed by the Cartographer algorithm was close to the real length of the crop row. However, when the LAD was less than 3.877 m<sup>2</sup>/m<sup>3</sup>, the error between the crop row length constructed by the Cartographer algorithm and the real length was large. The average error in the crop row length calculated by the Cartographer algorithm was 0.086&#xa0;m, and the coefficient of variation was 1.741%. The mean error of the crop row length constructed by the AF-PCP SLAM algorithm was 0.019&#xa0;m, and the coefficient of variation was 0.217%. Thus, the mean error and coefficient of variation of the crop row length were reduced by 77.9% and 87.5%, respectively, with the AF-PCP SLAM algorithm.</p>
<p>Moreover, as the LAD increases, the maximum gap length decreases for both algorithms. When the LAD was 5.301 m<sup>2</sup>/m<sup>3</sup>, the maximum gap lengths obtained by the two methods were consistent with the true value of the set crop row spacing gap. When the LAD was between 3.887 m<sup>2</sup>/m<sup>3</sup> and 4.83 m<sup>2</sup>/m<sup>3</sup>, the maximum gap length obtained by the Cartographer algorithm changed more slowly. However, when the LAD was between 2.945 m<sup>2</sup>/m<sup>3</sup> and 3.416 m<sup>2</sup>/m<sup>3</sup>, the maximum gap lengths obtained by the Cartographer algorithm were 0.458&#xa0;m and 1.2&#xa0;m, respectively. The maximum gap lengths obtained by the AF-PCP SLAM algorithm were 0.125&#xa0;m and 0.223&#xa0;m, which were better than those obtained by the Cartographer algorithm. The maximum gap length obtained by the Cartographer algorithm was 1.2&#xa0;m, and the&#xa0;mean value was 0.456&#xa0;m. The maximum gap length obtained by&#xa0;the AF-PCP SLAM algorithm was 0.223&#xa0;m, and the mean value&#xa0;was 0.124&#xa0;m, which is 72.8% lower than that of the Cartographer algorithm.</p>
<p>The crop row length in the mapping results reflects the quality of the crop row end map construction to a certain extent, and the lack of a constructed row end map and the increase in the maximum gap length increase the error rate of the path planning algorithm. When the maximum gap length exceeds the width of the robot body, the path planning algorithm chooses a closer route, resulting in errors in the global path planning of the robot. When the LAD is reduced to 3.877 m<sup>2</sup>/m<sup>3</sup>, the mapping results of the Cartographer algorithm no longer represent most of the crop information in the environment, while the AF-PCP SLAM algorithm can still achieve accurate mapping of crop rows; thus, the AF-PCP SLAM algorithm has higher mapping robustness.</p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Localization performance</title>
<p>According to the localization accuracy evaluation criteria presented in Section 2.3.2, the localization errors of the AF-PCP SLAM algorithm and track deduction algorithm were calculated at different speeds. <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11</bold></xref> shows the trajectories of robots operating at speeds of 0.2&#xa0;m/s, 0.4&#xa0;m/s, and 0.6&#xa0;m/s and the error curves in the <italic>x</italic>, <italic>y</italic>, and <italic>z</italic> directions. The RPE was used to analyze these results, and for each timestamp, the absolute difference between the true pose and the estimated pose was calculated. To highlight the RPE during the robot&#x2019;s movement, <xref ref-type="fig" rid="f12"><bold>Figure&#xa0;12</bold></xref> shows the error between the AF-PCP SLAM algorithm, the track deduction algorithm, and the real trajectory and maps the error to the trajectory through color coding. <xref ref-type="fig" rid="f13"><bold>Figure&#xa0;13</bold></xref> shows the curves of the RPE, mean, median, root mean square error, and standard deviation over time. The quantitative results of the error calculation are shown in <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>.</p>
<fig id="f11" position="float">
<label>Figure&#xa0;11</label>
<caption>
<p><bold>(A)</bold> A comparison of the absolute localization errors between the AF-PCP SLAM algorithm and Odom and the real trajectory. <bold>(B)</bold> A comparison of the error curves in the <italic>x</italic> and <italic>y</italic> directions over time. AF-PCP, adaptive filtering point cloud projection.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g011.tif"/>
</fig>
<fig id="f12" position="float">
<label>Figure&#xa0;12</label>
<caption>
<p>AF-PCP SLAM algorithm and Odom localization error. <bold>(A)</bold> The error in the trajectory of the AF-PCP SLAM algorithm. <bold>(B)</bold> The error in Odom. AF-PCP, adaptive filtering point cloud projection.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g012.tif"/>
</fig>
<fig id="f13" position="float">
<label>Figure&#xa0;13</label>
<caption>
<p>Odom and AF-PCP SLAM algorithm localization errors. <bold>(A)</bold> The curve of the relative pose error (RPE) of the AF-PCP SLAM algorithm with time. <bold>(B)</bold>&#xa0;The curve of the RPE of Odom with time. AF-PCP, adaptive filtering point cloud projection.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g013.tif"/>
</fig>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>RPE values of the AF-PCP SLAM algorithm, Cartographer algorithm, and Odom.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">Test ID</th>
<th valign="top" align="center">Methods</th>
<th valign="top" align="center">Max</th>
<th valign="top" align="center">Mean</th>
<th valign="top" align="center">Median</th>
<th valign="top" align="center">RMSE</th>
<th valign="top" align="center">Std</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">0.2</td>
<td valign="top" align="center">AF-PCP SLAM<break/>Cartographer<break/>Odom</td>
<td valign="top" align="center">0.127<break/>0.371<break/>0.567</td>
<td valign="top" align="center">0.026<break/>0.038<break/>0.129</td>
<td valign="top" align="center">0.018<break/>0.039<break/>0.090</td>
<td valign="top" align="center">0.033<break/>0.063<break/>0.185</td>
<td valign="top" align="center">0.021<break/>0.022<break/>0.133</td>
</tr>
<tr>
<td valign="top" align="center">0.4</td>
<td valign="top" align="center">AF-PCP SLAM<break/>Cartographer<break/>Odom</td>
<td valign="top" align="center">0.353<break/>0.109<break/>1.451</td>
<td valign="top" align="center">0.029<break/>0.047<break/>0.138</td>
<td valign="top" align="center">0.012<break/>0.044<break/>0.050</td>
<td valign="top" align="center">0.051<break/>0.085<break/>0.259</td>
<td valign="top" align="center">0.042<break/>0.028<break/>0.220</td>
</tr>
<tr>
<td valign="top" align="center">0.6</td>
<td valign="top" align="center">AF-PCP SLAM<break/>Cartographer<break/>Odom</td>
<td valign="top" align="center">0.540<break/>0.842<break/>1.524</td>
<td valign="top" align="center">0.046<break/>0.035<break/>0.233</td>
<td valign="top" align="center">0.020<break/>0.031<break/>0.095</td>
<td valign="top" align="center">0.074<break/>0.087<break/>0.397</td>
<td valign="top" align="center">0.058<break/>0.020<break/>0.322</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>RPE, relative pose error; AF-PCP, adaptive filtering point cloud projection; RMSE, root mean square error.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>
<xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11A</bold></xref> shows different trajectories: location_pos_remap represents the localization trajectory of the true ultrasonic value, tracked_pose represents the pose trajectory calculated by the AF-PCP SLAM algorithm, and Odom represents the pose trajectory calculated by the track inference algorithm. <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11B</bold></xref> shows the time-varying pose values in the <italic>x</italic>, <italic>y</italic>, and <italic>z</italic> directions. Since the robot moves in a two-dimensional plane, the value in the <italic>z</italic> direction is always 0. In each experiment, three extreme values are generated, which represent the moment when the robot turns at the end of a row. The results of the three experiments show that the pose trajectory calculated by the AF-PCP SLAM algorithm is closer to the real value than that calculated by the track deduction algorithm. The red circle in <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11</bold></xref> marks the position where the robot turns at the end of the last row, which corresponds to the maximum localization error.</p>
<p>According to the test results shown in <xref ref-type="fig" rid="f12"><bold>Figures&#xa0;12</bold></xref>, <xref ref-type="fig" rid="f13"><bold>13</bold></xref>, the average localization error is 0.026&#xa0;m, and the maximum localization error is 0.127&#xa0;m when the robot moves at 0.2&#xa0;m/s. When the robot moves at 0.4&#xa0;m/s and 0.6&#xa0;m/s, the initial fluctuation in the ultrasonic signal affects the maximum localization error; the average localization errors are 0.029&#xa0;m and 0.046&#xa0;m, respectively, which indicates that the localization accuracy is relatively stable. In contrast, the track deduction algorithm has a serious error accumulation problem. The average localization error of the track deduction algorithm is generally greater than 0.12&#xa0;m, and the average localization error reaches as high as 0.233&#xa0;m at a speed of 0.6&#xa0;m/s. The AF-PCP SLAM algorithm is based on the localization of laser matching, which is not sensitive to error accumulation or the environment, so it has higher localization accuracy. The experimental results in <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref> show that the AF-PCP SLAM algorithm proposed in this paper can achieve high-precision localization of robots at movement speeds of 0.2&#xa0;m/s, 0.4&#xa0;m/s, and 0.6&#xa0;m/s. The average localization error of the AF-PCP SLAM algorithm is reduced by 79.8%, 78.9%, and 80.3% at these three speeds compared with the error of the track deduction algorithm, and the average localization error is reduced by 79.9%.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>The actual greenhouse environment performance</title>
<p>To verify the ability of the AF-PCP SLAM algorithm to construct maps in actual greenhouse scenes, a greenhouse experiment was carried out at the &#x201c;Doctor&#x201d; farm base in Pinggu District, Beijing. The cucumber cultivar Yutian 156 was planted in this field, with a row width of 1.2&#xa0;m. In the experiment, the Cartographer algorithm and AF-PCP SLAM algorithm were used to construct the map of the greenhouse. The robot walked along the crop rows at a speed of 0.2&#xa0;m/s, and a map of the three rows of crops was constructed. The experimental environment and the map construction results of the two algorithms are shown in <xref ref-type="fig" rid="f14"><bold>Figure&#xa0;14</bold></xref>.</p>
<fig id="f14" position="float">
<label>Figure&#xa0;14</label>
<caption>
<p>The actual greenhouse scenario test <bold>(A)</bold> The greenhouse test scenario. <bold>(B)</bold> The map construction results of the Cartographer algorithm. <bold>(C)</bold> Tthe map construction results of the AF-PCP SLAM algorithm.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-15-1276799-g014.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="f14"><bold>Figure&#xa0;14A</bold></xref> shows the greenhouse test scenario, and <xref ref-type="fig" rid="f14"><bold>Figures&#xa0;14B, C</bold></xref> are the map construction results of the Cartographer algorithm and AF-PCP SLAM algorithm, respectively. <xref ref-type="fig" rid="f14"><bold>Figure&#xa0;14A</bold></xref> shows that due to the larger leaves of the actual greenhouse crops, the difference between the two algorithms in the actual greenhouse scene is more obvious. <xref ref-type="fig" rid="f14"><bold>Figures&#xa0;14B, C</bold></xref> show that, compared with the Cartographer algorithm, the AF-PCP SLAM algorithm has a larger mapping area for crop rows. Accurate mapping of crop row contours can effectively control the moving trajectory and range of motion during robot navigation and effectively avoid damaging crops during robot operation. Moreover, row2 and row3 in <xref ref-type="fig" rid="f14"><bold>Figure&#xa0;14B</bold></xref> exhibit obvious drifts because uneven ground reduces the mapping accuracy of crop rows and leads to map drift compared with that in the laboratory scene. The corresponding row2 and row3 drifts in <xref ref-type="fig" rid="f14"><bold>Figure&#xa0;14C</bold></xref> are small, which also reflects, to some extent, that the AF-PCP SLAM algorithm has higher mapping robustness in actual greenhouse scenes.</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<sec id="s4_1" sec-type="discussion">
<label>4.1</label>
<title>Discussion of the mapping results</title>
<p>Suspended plant patterns are widely used in greenhouses and agricultural environments. In this experiment, mobile robots walked along crop rows in greenhouses. However, the robot is in a repeated scene when walking between crop rows. Because crop rows usually have highly repetitive structures, with long, narrow, and tall rows, this poses a challenge in map construction. When constructing a map in a low-density canopy environment, crop rows may have missing content, such as voids and gaps. There are two reasons for this. First, when the laser beam passes through a gap in the leaves, another row of crops may be detected, or the wall may be directly detected, resulting in inconsistent observation results. This reduces the probability of obstacles in the occupied grid map corresponding to this position, which leads to the degradation of the map. Second, the irregular structure of the blade may cause the laser detection results of blades at the same position to be inconsistent under different robot poses. As the canopy leaf area density decreases, this situation is aggravated, resulting in the absence of crop rows on the map. To address these problems, in this paper, the innovative AF-PCP SLAM algorithm is designed, which fully considers the spatial characteristics of suspended crops in greenhouses, extracts and compresses the map contour based on 16-line LiDAR data, and maps the data to a 2D plane to establish the environment map and localize the robot. This innovative approach addresses the abovementioned map degradation problem, thereby improving the accuracy and robustness of the mapping results.</p>
</sec>
<sec id="s4_2" sec-type="discussion">
<label>4.2</label>
<title>Discussion of the localization results</title>
<p>Due to the equidistant distribution of greenhouse crop rows, greenhouse environments have a high degree of symmetry, forming a &#x201c;corridor scene&#x201d;. The corridor problem is one of the key problems faced by the SLAM method. Because the LiDAR detection results are similar, they cannot reflect the actual displacement in the forward direction, which may introduce localization inaccuracies in the forward direction with the SLAM method. According to <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11A</bold></xref>, in this experiment, the results estimated by the Odom localization method are accurate in the initial row. However, when the robot moves to the end of the crop row and turns into the next row, the localization error of the Odom algorithm in the <italic>y</italic> direction increases with increasing distance. When the robot returns to the starting point, the pose estimation error of the Odom algorithm is approximately 0.2&#xa0;m. According to <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11B</bold></xref>, the position of the extreme point corresponds to the trajectory in <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11A</bold></xref>. The trajectory position of the red circle in <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11A</bold></xref> corresponds to the error at the position of the red circle labeled in <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11B</bold></xref>. The three tests show that the error in the <italic>x</italic> direction is the largest at this time. We believe that this phenomenon occurs because when the agricultural robot turns, the deviation in the <italic>y</italic> direction increases due to errors in the robot&#x2019;s coordinate system. This is due to the limitations of the two-wheel differential model. Even after the odometer calibration, the deviation in the <italic>y</italic> direction increases rapidly as the distance increases. The <italic>x</italic>-direction error in <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11B</bold></xref> is mainly caused by the <italic>y</italic>-direction error component in the robot coordinate system, so the maximum error is generated after the robot turns down the last row. At this moment, the localization error of the AF-PCP SLAM algorithm proposed in this paper is much smaller than that of the track deduction algorithm, which allows the robot to accurately navigate to the next crop row according to the path planning algorithm, thus reducing the risk of row-end collisions during robot navigation. The results shown in <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref> intuitively reflect the influence of the three moving speeds on the localization accuracy. With increasing moving speed, the mean, maximum, and standard deviation of the localization error increase. The reason is that the increase in the mechanical vibration of the robot causes additional input noise, which affects the collection of the point cloud data and the matching effect of the point cloud. Moreover, as the processing efficiency of the algorithm remains unchanged, an increase in the rate of change in the robot pose leads to a decrease in the localization accuracy.</p>
<p>In <xref ref-type="fig" rid="f12"><bold>Figure&#xa0;12</bold></xref>, there is a certain regularity between the fluctuations in the RPE value and the extreme point position of the error curve in the <italic>x</italic> direction in <xref ref-type="fig" rid="f10"><bold>Figure&#xa0;10B</bold></xref>. For example, the RPE value at the position indicated by the red circle in <xref ref-type="fig" rid="f12"><bold>Figure&#xa0;12A</bold></xref> decreases in all three speed tests. The position of the decrease corresponds to the extreme point in the error curve, that is, the moment when the robot moves to the end of the row. Although the increase in the error is small (only 2&#xa0;cm), there is a certain regularity in the results. This phenomenon may be related to robot system errors and may also be related to ultrasonic localization labeling errors. Through repeated comparisons, we believe that this phenomenon is related to the physical characteristics of the ultrasonic localization labeling method. When the robot moves between rows, the localization accuracy of the ultrasonic label is reduced due to the occlusion of the canopy crop. When the robot moves to the end of the crop row, the occlusion effect of the crop row is reduced. At this time, the vehicle label carried by the robot is closer to the localization label arranged in the environment, thereby improving the localization accuracy. This study did not deploy ultrasound tags in greenhouse scenarios. In future work, we will deploy this module in actual greenhouse scenarios as an input to the system for large-scale scene localization. Based on the error characteristics of ultrasound tags, trajectory inference algorithms, and SLAM positioning algorithms, the fusion of the three can achieve robust localization in complex environments. This is also another research work we are currently conducting.</p>
<p>For agricultural robots, real-time localization of crop rows and turns in large-scale greenhouse environments must be achieved. The proposed AF-PCP SLAM algorithm can accurately estimate the trajectory, as shown in <xref ref-type="fig" rid="f12"><bold>Figure&#xa0;12A</bold></xref>. The maximum average RPE is 0.046&#xa0;m. The proposed method was shown to have high robustness and accuracy in challenging agricultural environments, outperforming the current state-of-the-art approaches.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusion">
<label>5</label>
<title>Conclusion</title>
<p>Aiming to address the problem that low-density canopy environments in greenhouses affect the robustness and localization accuracy of SLAM methods, in this paper, a spatial downsampling map construction and localization method based on the Cartographer framework is proposed, an adaptive filtering spatial point cloud projection algorithm is designed, and a greenhouse map construction and high-precision pose estimation system are developed. For greenhouse crop leaf area densities ranging from 2.945 m<sup>2</sup>/m<sup>3</sup> to 5.301 m<sup>2</sup>/m<sup>3</sup>, the method proposed in this paper can accurately model the contours of sparse crop rows. Compared with that of the Cartographer algorithm, the map area of the AF-PCP SLAM algorithm for the crop row increased by 155.7%. The mean error and coefficient of variation for the crop row length were 0.019&#xa0;m and 0.217%, respectively, which were respectively 77.9% and 87.5% less than those of the Cartographer algorithm. The average maximum void length was 0.124&#xa0;m, which was 72.8% less than that of the Cartographer algorithm. Localization tests were carried out at speeds of 0.2&#xa0;m/s, 0.4&#xa0;m/s, and 0.6&#xa0;m/s. The average relative localization errors of the actual motion trajectory and the real path were 0.026&#xa0;m, 0.029&#xa0;m, and 0.046&#xa0;m, respectively, and the standard deviations were less than 0.06&#xa0;m. Compared with those of the track deduction algorithm, the average localization error was reduced by 79.9%. These results show that the method can meet the requirements of map construction and localization in greenhouse environments. Thus, the proposed method is an effective approach for the autonomous operation of agricultural robots, providing a basis for localization and perception for efficient decision-making and safe operation of intelligent agricultural machinery and equipment in greenhouses.</p>
</sec>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>HT: Conceptualization, Data curation, Funding acquisition, Investigation, Methodology, Resources, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &amp; editing. XZ: Conceptualization, Data curation, Investigation, Methodology, Software, Validation, Writing &#x2013; review &amp; editing. CZ: Conceptualization, Funding acquisition, Investigation, Methodology, Project administration, Resources, Supervision, Validation, Writing &#x2013; original draft, Writing &#x2013; review &amp; editing. HF: Software, Visualization, Writing &#x2013; review &amp; editing. LC: Formal Analysis, Funding acquisition, Writing &#x2013; review &amp; editing. MY: Formal Analysis, Funding acquisition, Writing &#x2013; review &amp; editing.</p>
</sec>
</body>
<back>
<sec id="s8" sec-type="funding-information">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. This work was financially supported by the Jiangsu Province Agricultural Science and Technology Independent Innovation Fund Project (Grant number: CX ( 21 ) 2006 ), the Jiangsu Province Key Research and Development Plan Project (BE2021302), the Youth Research Fund of the Beijing Academy of Agriculture and Forestry Sciences (Grant number: QNJJ202013), the Beijing Academy of Agricultural and Forestry Sciences Science and Technology Innovation Capacity Construction Project (KJCX20210402).</p>
</sec>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>Author XZ was employed by company Beijing PAIDE Science and Technology Development Co., Ltd.</p>
<p>The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Abanay</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Masmoudi</surname> <given-names>L.</given-names>
</name>
<name>
<surname>El Ansari</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Gonzalez-Jimenez</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Moreno</surname> <given-names>F.-A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>LIDAR-based autonomous navigation method for an agricultural mobile robot in strawberry greenhouse: agriEco robot</article-title>. <source>AIMS Electron. Electr. Eng.</source> <volume>6</volume>, <fpage>317</fpage>&#x2013;<lpage>328</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3934/electreng.2022019</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aguiar</surname> <given-names>A. S.</given-names>
</name>
<name>
<surname>dos Santos</surname> <given-names>F. N.</given-names>
</name>
<name>
<surname>Sobreira</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Boaventura-Cunha</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Sousa</surname> <given-names>A. J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Localization and mapping on agriculture based on point-feature extraction and semiplanes segmentation from 3D LiDAR data</article-title>. <source>Front. Robot. AI</source> <volume>9</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/frobt.2022.832165</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bai</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Xu</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Shi</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Diao</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Vision-based navigation and guidance for agricultural autonomous vehicles and robots: a review</article-title>. <source>Comput. Electron. Agric.</source> <volume>205</volume>, <elocation-id>107584</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2022.107584</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Milioto</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Palazzolo</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Giguere</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Behley</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Stachniss</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Suma++: efficient lidar-based semantic slam</article-title>,&#x201d; in <conf-name>2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</conf-name>, <conf-loc>Macau, China</conf-loc>. <fpage>4530</fpage>&#x2013;<lpage>4537</lpage> (IEEE).</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>S. W.</given-names>
</name>
<name>
<surname>Nardari</surname> <given-names>G. V.</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>E. S.</given-names>
</name>
<name>
<surname>Qu</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Romero</surname> <given-names>R. A. F.</given-names>
</name>
<etal/>
</person-group>. (<year>2020</year>). <article-title>Sloam: semantic lidar odometry and mapping for forest inventory</article-title>. <source>IEEE Robot. Autom. Lett.</source> <volume>5</volume> (<issue>2</issue>), <fpage>612</fpage>&#x2013;<lpage>619</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/LRA.2019.2963823</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>S. A.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Luo</surname> <given-names>L.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Multi-feature fusion tree trunk detection and orchard mobile robot localization using camera/ultrasonic sensors</article-title>. <source>Comput. Electron. Agric.</source> <volume>147</volume>, <fpage>91</fpage>&#x2013;<lpage>108</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2018.02.009</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Choi</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Park</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>J.-J.</given-names>
</name>
<name>
<surname>Shin</surname> <given-names>Y.-S.</given-names>
</name>
<name>
<surname>Seo</surname> <given-names>H.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Work efficiency analysis of multiple heterogeneous robots for harvesting crops in smart greenhouses</article-title>. <source>Agronomy</source> <volume>12</volume> (<issue>11</issue>), <elocation-id>2844</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy12112844</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dong</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Roy</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Isler</surname> <given-names>V.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Semantic mapping for orchard environments by merging two-sides reconstructions of tree rows</article-title>. <source>J. Field Robot.</source> <volume>37</volume> (<issue>1</issue>), <fpage>97</fpage>&#x2013;<lpage>121</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/rob.21876</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fu</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Zheng</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zheng</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zhai</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Design and experimental verification of the YOLOV5 model implanted with a transformer module for target-oriented spraying in cabbage farming</article-title>. <source>Agronomy</source> <volume>12</volume> (<issue>10</issue>), <elocation-id>2551</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy12102551</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gu</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Zhai</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>L. N.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>F.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Detection model of tree canopy leaf area based on LiDAR technology</article-title>. <source>Trans. Chin. Soc Agric. Mach.</source> <volume>52</volume> (<issue>11</issue>), <fpage>278</fpage>&#x2013;<lpage>286</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.6041/j.issn.1000-1298.2021.11.030</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Hess</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Kohler</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Rapp</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Andor</surname> <given-names>D.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>Real-time loop closure in 2D LIDAR SLAM</article-title>,&#x201d; in <conf-name>2016 IEEE International Conference on Robotics and Automation (ICRA)</conf-name>, <conf-loc>Stockholm, Sweden</conf-loc>. <fpage>1271</fpage>&#x2013;<lpage>1278</lpage> (IEEE).</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hou</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Pu</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Ding</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>G.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Design and implementation of mobile greenhouse environmental monitoring system based on UWB and internet of things</article-title>. <source>Trans. Chin. Soc Agric. Eng. (Trans. CSAE)</source> <volume>36</volume> (<issue>23</issue>), <fpage>229</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.11975/j.issn.1002-6819.2020.23.027</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Shiigi</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Tsay</surname> <given-names>L. W. J.</given-names>
</name>
<name>
<surname>Nakanishi</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Suzuki</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Ogawa</surname> <given-names>Y.</given-names>
</name>
<etal/>
</person-group>. (<year>2021</year>). <article-title>A sound-based positioning system with centimeter accuracy for mobile robots in a greenhouse using frequency shift compensation</article-title>. <source>Comput. Electron. Agric.</source> <volume>187</volume>, <elocation-id>106235</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2021.106235</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Sugiyama</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Research progress and enlightenment of Japanese harvesting robot in facility agriculture</article-title>. <source>Smart Agric.</source> <volume>4</volume> (<issue>2</issue>), <fpage>135</fpage>&#x2013;<lpage>149</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.12133/j.smartag.SA202202008</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jiang</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Ahamed</surname> <given-names>T.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Navigation of an autonomous spraying robot for orchard operations using LiDAR for tree trunk detection</article-title>. <source>Sensors</source> <volume>23</volume> (<issue>10</issue>), <elocation-id>4808</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s23104808</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jiang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Yi</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Lv</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Autonomous navigation system of greenhouse mobile robot based on 3D lidar and 2D lidar SLAM</article-title>. <source>Front. Plant Sci.</source> <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2022.815218</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jin</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Xu</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Yuan</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Development status and trend of agricultural robot technology</article-title>. <source>Int. J. Agric. Biol. Eng.</source> <volume>14</volume> (<issue>4</issue>), <fpage>1</fpage>&#x2013;<lpage>19</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.25165/j.ijabe.20211404.6821</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lao</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Feng</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Path planning of greenhouse robot based on fusion of improved A* algorithm and dynamic window approach</article-title>. <source>Nongye Jixie Xuebao/Trans. Chin. Soc Agric. Mach.</source> <volume>52</volume> (<issue>1</issue>), <fpage>14</fpage>&#x2013;<lpage>22</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.6041/j.issn.1000-1298.2021.01.002</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Tan</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Zhai</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Grading detection method of grape downy mildew based on K-means clustering and random forest algorithm</article-title>. <source>J. Agric. Mach.</source> <volume>53</volume> (<issue>05</issue>), <fpage>225</fpage>&#x2013;<lpage>236</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.6041/j.issn.1000-1298.2022.05.023</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Long</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Xiang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Lei</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Dai</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Integrated indoor positioning system of greenhouse robot based on UWB/IMU/ODOM/LIDAR</article-title>. <source>Sensors</source> <volume>22</volume> (<issue>13</issue>), <elocation-id>4819</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s22134819</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Matsuzaki</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Masuzawa</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Miura</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Oishi</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>3D semantic mapping in greenhouses for agricultural mobile robots with robust object recognition using robots' trajectory</article-title>,&#x201d; in <conf-name>2018 IEEE International Conference on Systems, Man, and Cybernetics (SMC)</conf-name>, <conf-loc>Miyazaki, Japan</conf-loc>. <fpage>357</fpage>&#x2013;<lpage>362</lpage> (IEEE).</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mendes</surname> <given-names>J. M.</given-names>
</name>
<name>
<surname>dos Santos</surname> <given-names>F. N.</given-names>
</name>
<name>
<surname>Ferraz</surname> <given-names>N. A.</given-names>
</name>
<name>
<surname>do Couto</surname> <given-names>P. M.</given-names>
</name>
<name>
<surname>dos Santos</surname> <given-names>R. M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Localization based on natural features detector for steep slope vineyards</article-title>. <source>J. Intell. Robot. Syst.</source> <volume>93</volume> (<issue>3</issue>), <fpage>433</fpage>&#x2013;<lpage>446</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10846-017-0770-8</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nissimov</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Goldberger</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Alchanatis</surname> <given-names>V.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Obstacle detection in a greenhouse environment using the kinect sensor</article-title>. <source>Comput. Electron. Agric.</source> <volume>113</volume>, <fpage>104</fpage>&#x2013;<lpage>115</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2015.02.001</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ouyang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Hatsugai</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Shimizu</surname> <given-names>I.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Tomato disease monitoring system using modular extendable mobile robot for greenhouses: automatically reporting locations of diseased tomatoes</article-title>. <source>Agronomy</source> <volume>12</volume> (<issue>12</issue>), <elocation-id>3160</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy12123160</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Palleja</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Landers</surname> <given-names>A. J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Real time canopy density validation using ultrasonic envelope signals and point quadrat analysis</article-title>. <source>Comput. Electron. Agric.</source> <volume>134</volume>, <fpage>43</fpage>&#x2013;<lpage>50</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2017.01.012</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qiao</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Valente</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Su</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>He</surname> <given-names>D.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Editorial: AI, sensors and robotics in plant phenotyping and precision agriculture</article-title>. <source>Front. Plant Sci.</source> <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2022.1064219</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saha</surname> <given-names>K. K.</given-names>
</name>
<name>
<surname>Tsoulias</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Weltzien</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Zude-Sasse</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Estimation of vegetative growth in strawberry plants using mobile LiDAR laser scanner</article-title>. <source>Horticulturae</source> <volume>8</volume> (<issue>2</issue>), <elocation-id>90</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/horticulturae8020090</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shamshiri</surname> <given-names>R. R.</given-names>
</name>
<name>
<surname>Weltzien</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Hameed</surname> <given-names>I. A.</given-names>
</name>
<name>
<surname>Yule</surname> <given-names>I. J.</given-names>
</name>
<name>
<surname>Grift</surname> <given-names>T. E.</given-names>
</name>
<name>
<surname>Balasundram</surname> <given-names>S. K.</given-names>
</name>
<etal/>
</person-group>. (<year>2018</year>). <article-title>Research and development in agricultural robotics: a perspective of digital farming</article-title>. <source>Int. J. Agric. Biol. Eng.</source> <volume>11</volume> (<issue>4</issue>), <fpage>1</fpage>&#x2013;<lpage>14</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.25165/j.ijabe.20181104.4278</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shi</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Cui</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Integrated navigation by a greenhouse robot based on an odometer/lidar</article-title>. <source>Instrum. Mes. M&#xe9;trol.</source> <volume>19</volume> (<issue>2</issue>), <fpage>91</fpage>&#x2013;<lpage>101</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.18280/i2m.190203</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Su</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Shi</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Greenhouse tomato picking robot chassis</article-title>. <source>Agriculture</source> <volume>13</volume> (<issue>3</issue>), <elocation-id>532</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agriculture13030532</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Qiu</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Fan</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Ji</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Feng</surname> <given-names>Q.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Intrinsic calibration of multi-beam LiDARs for agricultural robots</article-title>. <source>Remote Sens.</source> <volume>14</volume> (<issue>19</issue>), <elocation-id>4846</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs14194846</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2022</year>a). <article-title>Applications of machine vision in agricultural robot navigation: a review</article-title>. <source>Comput. Electron. Agric.</source> <volume>198</volume>, <elocation-id>107085</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2022.107085</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Yan</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Meng</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Yao</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Han</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>B.</given-names>
</name>
</person-group> (<year>2022</year>b). <article-title>DSE-YOLO: detail semantics enhancement YOLO for multi-stage strawberry detection</article-title>. <source>Comput. Electron. Agric.</source> <volume>198</volume>, <elocation-id>107057</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2022.107057</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Westling</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Underwood</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Bryson</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A procedure for automated tree pruning suggestion using LiDAR scans of fruit trees</article-title>. <source>Comput. Electron. Agric.</source> <volume>187</volume>, <elocation-id>106274</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2021.106274</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xie</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Fan</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Visual-inertial SLAM in featureless environments on lunar surface</article-title>. <source>Acta Aeronautica Astronautica Sinica.</source> <volume>42</volume> (<issue>1</issue>), <fpage>524169</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.7527/S1000-6893</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yan</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>X. A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Real-time localization and mapping utilizing multi-sensor fusion and visual&#x2013;IMU&#x2013;wheel odometry for agricultural robots in unstructured, dynamic and GPS-denied greenhouse environments</article-title>. <source>Agronomy</source> <volume>12</volume> (<issue>8</issue>), <elocation-id>1740</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy12081740</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yao</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Wireless positioning and path tracking for a mobile platform in greenhouse</article-title>. <source>Int. J. Agric. Biol. Eng.</source> <volume>14</volume> (<issue>1</issue>), <fpage>216</fpage>&#x2013;<lpage>223</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.25165/j.ijabe.20211401.5627</pub-id>
</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhai</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Song</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Status and prospect of intelligent measurement and control technology for agricultural equipment</article-title>. <source>Trans. Chin. Soc Agric. Mach.</source> <volume>53</volume> (<issue>4</issue>), <fpage>1</fpage>&#x2013;<lpage>20</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.6041/j.issn.1000-1298.2022.04.001</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Gong</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Factor graph-based high-precision visual positioning for agricultural robots with fiducial markers</article-title>. <source>Comput. Electron. Agric.</source> <volume>201</volume>, <elocation-id>107295</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2022.107295</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Guo</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Gao</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Sugirbay</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Research on 2D laser automatic navigation control for standardized orchard</article-title>. <source>Appl. Sci.</source> <volume>10</volume> (<issue>8</issue>), <elocation-id>2763</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/app10082763</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Ge</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Yu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Research on volume prediction of single tree canopy based on three-dimensional (3D) LiDAR and clustering segmentation</article-title>. <source>Int. J. Remote Sens.</source> <volume>42</volume> (<issue>2</issue>), <fpage>738</fpage>&#x2013;<lpage>755</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1080/01431161.2020.1811917</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>
