<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="review-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Bioeng. Biotechnol.</journal-id>
<journal-title>Frontiers in Bioengineering and Biotechnology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Bioeng. Biotechnol.</abbrev-journal-title>
<issn pub-type="epub">2296-4185</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1388609</article-id>
<article-id pub-id-type="doi">10.3389/fbioe.2024.1388609</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Bioengineering and Biotechnology</subject>
<subj-group>
<subject>Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Anthropomorphic motion planning for multi-degree-of-freedom arms</article-title>
<alt-title alt-title-type="left-running-head">Zheng et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fbioe.2024.1388609">10.3389/fbioe.2024.1388609</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Zheng</surname>
<given-names>Xiongfei</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2599607/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Han</surname>
<given-names>Yunyun</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/920135/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Liang</surname>
<given-names>Jiejunyi</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1767594/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>State Key Laboratory of Intelligent Manufacturing Equipment and Technology</institution>, <institution>Huazhong University of Science and Technology</institution>, <addr-line>Wuhan</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Neurobiology</institution>, <institution>School of Basic Medicine</institution>, <institution>Tongji Medical College</institution>, <institution>Huazhong University of Science and Technology</institution>, <addr-line>Wuhan</addr-line>, <country>China</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2323933/overview">Wujing Cao</ext-link>, Chinese Academy of Sciences (CAS), China</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2514292/overview">Bingshan Hu</ext-link>, University of Shanghai for Science and Technology, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1262283/overview">Chunjie Chen</ext-link>, Chinese Academy of Sciences (CAS), China</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Jiejunyi Liang, <email>jiejunyiliang@gmail.com</email>; Yunyun Han, <email>yhan@hust.edu.cn</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>28</day>
<month>05</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>12</volume>
<elocation-id>1388609</elocation-id>
<history>
<date date-type="received">
<day>20</day>
<month>02</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>13</day>
<month>05</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Zheng, Han and Liang.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Zheng, Han and Liang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>With the development of technology, the humanoid robot is no longer a concept, but a practical partner with the potential to assist people in industry, healthcare and other daily scenarios. The basis for the success of humanoid robots is not only their appearance, but more importantly their anthropomorphic behaviors, which is crucial for the human-robot interaction. Conventionally, robots are designed to follow meticulously calculated and planned trajectories, which typically rely on predefined algorithms and models, resulting in the inadaptability to unknown environments. Especially when faced with the increasing demand for personalized and customized services, predefined motion planning cannot be adapted in time to adapt to personal behavior. To solve this problem, anthropomorphic motion planning has become the focus of recent research with advances in biomechanics, neurophysiology, and exercise physiology which deepened the understanding of the body for generating and controlling movement. However, there is still no consensus on the criteria by which anthropomorphic motion is accurately generated and how to generate anthropomorphic motion. Although there are articles that provide an overview of anthropomorphic motion planning such as sampling-based, optimization-based, mimicry-based, and other methods, these methods differ only in the nature of the planning algorithms and have not yet been systematically discussed in terms of the basis for extracting upper limb motion characteristics. To better address the problem of anthropomorphic motion planning, the key milestones and most recent literature have been collated and summarized, and three crucial topics are proposed to achieve anthropomorphic motion, which are motion redundancy, motion variation, and motion coordination. The three characteristics are interrelated and interdependent, posing the challenge for anthropomorphic motion planning system. To provide some insights for the research on anthropomorphic motion planning, and improve the anthropomorphic motion ability, this article proposes a new taxonomy based on physiology, and a more complete system of anthropomorphic motion planning by providing a detailed overview of the existing methods and their contributions.</p>
</abstract>
<kwd-group>
<kwd>anthropomorphic</kwd>
<kwd>motion planning</kwd>
<kwd>arms</kwd>
<kwd>motion redundancy</kwd>
<kwd>motion variation</kwd>
<kwd>motion coordination</kwd>
</kwd-group>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Biomechanics</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>Robots, following meticulously calculated and planned trajectories, have been providing safer and more efficient working environments for humans with superior quality in many scenarios. Especially in industrial manufacturing, robotic arms can even independently perform various tasks such as handling, machining, and assembling in specified conditions, which dramatically improves the productivity.</p>
<p>However, conventional motion planning techniques typically rely on predefined algorithms and models that may not be adaptable to new environments. Especially when faced with the increasing demand for personalized and customized services, predefined motion planning cannot be adjusted in time to adapt to personal behavior, which will seriously affect the efficiency of task completion. In this case, robots need to establish a stronger connection with humans by increasing interaction and expanding the human-robot sharing space in order to develop algorithms and models that could adapt to the individual preferences, habits, and needs.</p>
<p>Recent researches demonstrated that humans are more inclined to accept actions similar to themselves during human-robot interactions (<xref ref-type="bibr" rid="B4">Arkin and Moshkina, 2014</xref>; <xref ref-type="bibr" rid="B18">Dragan and Srinivasa, 2014</xref>). To meet this requirement, researches across the world have been initiated to improve human-robot interaction by enhancing the anthropomorphism of robot motion (<xref ref-type="bibr" rid="B40">Kiesler et al., 2008</xref>; <xref ref-type="bibr" rid="B45">K&#xfc;hnlenz et al., 2013</xref>). There are three main scenarios, service robots, new industrial robots, and wearable robots (exoskeletons), where anthropomorphism of robot arm motion is highly demanding and the robots need to interact and collaborate with humans in a shared human-robot interaction space. For anthropomorphic service robots, adopting anthropomorphic motion can significantly enhance the robot&#x2019;s similarity to humans, foster a greater sense of familiarity, and thus increase the robot&#x2019;s acceptance among users. For new industrial robots, anthropomorphic motion can enhance not only the synchronicity between the workers and the robots during collaborative process, but also the efficiency and overall safety of the human-robot interaction. During the interaction, the workers can accurately and promptly comprehend the robots&#x2019; behavior, which enables them to make rational assumptions about the robots&#x2019; motion patterns. When there is a risk of collision between a robot and a worker or the environment, the worker can take prompt action to avoid collisions and increase the safety. In addition, anthropomorphic motion provides a better way to interact, which greatly reduces the training time of the worker. For wearable robots, anthropomorphic motion has a more direct impact on the therapeutic performance of rehabilitation training. For exoskeletons used to enhance human function, if the motion does not match the way that the patients move, the rehabilitation training will not only fail to enable the patient to regain movement ability, but may cause secondary damage to the patient.</p>
<p>How to achieve anthropomorphic motion in robots? An analysis of human movement shows that the process of human movement at the physiological level can be represented by the process chain: neural commands-muscle activation-joint motion-hand movement-task goal (<xref ref-type="bibr" rid="B24">Flash et al., 2013</xref>). Inspired by this chain, current researches on anthropomorphic motion mainly focus on three directions: anthropomorphic structural design, anthropomorphic trajectory generation, and anthropomorphic motion control (<xref ref-type="bibr" rid="B46">Kulic et al., 2016</xref>).</p>
<p>In anthropomorphic structural design, researchers have developed humanoid robots that closely resemble humans in appearance, joint structure, and motion by modeling the human musculoskeletal system. This kind of design is inspired by biology and based on research in human anatomy, kinesiology, and biomechanics (<xref ref-type="bibr" rid="B69">Ogawa et al., 2011</xref>; <xref ref-type="bibr" rid="B73">Paik et al., 2012</xref>; <xref ref-type="bibr" rid="B50">Lenzi et al., 2016</xref>), which has contributed to an increased acceptance and trust among users. However, these robots are still challenged in mimicking the flexibility, elasticity and stability of the human limbs.</p>
<p>In anthropomorphic trajectory generation, researchers tried to explore the human upper limb movement laws from the motion posture and trajectory, combine it with human kinematics and physiology, and determine the optimal motion trajectories and movement sequences through simulation and experimental validation, so as to make the robot&#x2019;s motion more natural, smooth, and match the physiological characteristics of the human. Specifically, by studying the correlations and variations between the rotation angles of certain joints (e.g., elbow elevation angle (<xref ref-type="bibr" rid="B42">Kim et al., 2006</xref>)) and hand postures, researchers have generated anthropomorphic motion for robotic arms (<xref ref-type="bibr" rid="B117">Zanchettin et al., 2013</xref>; <xref ref-type="bibr" rid="B89">Su et al., 2018</xref>). In addition, the researchers found some motion characteristics, such as bell-shaped velocity curve (<xref ref-type="bibr" rid="B20">Ferrer et al., 2023</xref>), sinusoidal acceleration curve (<xref ref-type="bibr" rid="B64">Morasso, 1981</xref>), bell-shaped positional variance (<xref ref-type="bibr" rid="B97">Taniai et al., 2022</xref>), Fitts&#x2019;s Law (<xref ref-type="bibr" rid="B22">Fitts, 1954</xref>), and temporal distribution (<xref ref-type="bibr" rid="B115">Young et al., 2009</xref>), for analyzing physical quantities, such as joint velocities, accelerations, and trajectories during the natural movement of the upper limbs, and used them as criteria for generating anthropomorphic trajectories for robotic arms. However, these studies only focus on the kinematic nature of upper limb movement, and have not tapped into the cornerstones of upper limb movement laws that underlie anthropomorphic motion generation in robotic arms. <xref ref-type="bibr" rid="B34">Guigon et al. (2007)</xref> assumed that the motor control is governed by four principles (separation principle, optimal feedback control principle, maximum efficiency principle, constant effort principle) by building a computational model, and attempted to provide a unified explanation of biological motor behavior. <xref ref-type="bibr" rid="B102">von Zitzewitz et al. (2013)</xref> argued that robot perception plays a crucial role in human-robot interaction, and anthropomorphism as a factor of interaction efficiency should not be considered as a single parameter, but as a variable influenced by other parameters. They proposed to divide the network of parameter fields describing anthropomorphism into two categories: appearance and behavior (<xref ref-type="bibr" rid="B62">Minato et al., 2012</xref>) to describe the static and dynamic states of the robot, respectively. However, these motion parameters only describe possible similar aspects of robots and humans from multiple perspectives, but do not provide quantitative anthropomorphic metrics that can be directly used as criteria for generating anthropomorphic motion.</p>
<p>In anthropomorphic motion control, researchers are trying to explore how to achieve precise control and adaptive regulation of robot motion by mimicking human movement styles and behavioral characteristics, so that robot motion will have similar motor capabilities to those of humans, which includes accurate collection and processing of sensor data, as well as real-time adjustment and optimization of control algorithms. Most approaches rely on high-gain control and fast control loops that enable robots to perform specific tasks in structured environments, but are unable to deal with unexpected disturbances or system variations, and do not simulate the flexibility, versatility, and robustness of human movement control.</p>
<p>Among the three research directions, anthropomorphic trajectory generation can provide input for anthropomorphic motion planning based on human motion characteristics, which is crucial for robots to realize anthropomorphic motion. It enables robots with natural and smooth motion, enhances their adaptability and safety, and improves the performance of human-robot interaction so that robots have more anthropomorphic motion and behavior characteristics. Overall, current studies have made some progress in improving the anthropomorphism of robot motion, but there is still no consensus on the criteria by which anthropomorphic motion is accurately generated. The main reason is the criteria derived from existing research may not be able to fully cover the most important aspects concerning the similarity between robots and humans.</p>
<p>In recent years, researchers have gradually deepened the study of anthropomorphic motion planning and applied it to humanoid robots, which has made considerable progress. Service robots have gradually been a part of people&#x2019;s daily lives, cooperating with them in a friendly way (<xref ref-type="bibr" rid="B76">Potkonjak et al., 2001</xref>). New industrial robots can not only work closely with human workers to perform complex manufacturing and assembly tasks, but can also operate independently in harsh environments such as high temperatures and pressures, increasing the efficiency of industrial production and ensuring worker safety (<xref ref-type="bibr" rid="B116">Zacharias et al., 2011</xref>). Wearable robots enhance or reconstruct the natural movement of disabled limbs (<xref ref-type="bibr" rid="B87">Soltani Zarrin et al., 2021</xref>). These products dramatically improve efficiency and deliver better care and services that not only improve quality of life, but also drive technological advancement and innovation. At the same time, biomechanics, neurophysiology, and exercise physiology have advanced our understanding of the body&#x2019;s mechanisms for generating and controlling movement, and upper limb motion patterns were progressively resolved, which provides a physiological basis for motion characteristic extraction. With the help of tools in statistics and computer graphics, researchers can discover the laws embedded in the upper limb movement data (or movement sequences), extract the motion characteristics, describe them intuitively and quantitatively, realistically show the upper limb movement status and motion characteristics, build models to describe human movement behaviors, so as to replicate human movement on a humanoid robot as closely as possible through motion planning. In addition, the increasing computational capability of motion models has facilitated the continuous improvement of motion control schemes, which in turn has promoted in-depth exploration of the nature of motion. At the same time, concepts such as the &#x201c;spatiotemporal characteristics&#x201d; inherent in the movement process have been proposed as new anthropomorphic evaluation criteria. With the development of virtual reality, machine learning, intent recognition, semantic grasping, and other related technologies, motion accuracy has been significantly improved, which has driven the emerge of new anthropomorphic motion planning methods to some extent.</p>
<p>However, the current anthropomorphic motion planning algorithms still have some problems in practical applications, that need to be further improved and solved. First, the modeling of the biomechanical characteristics of the human movement is not investigated enough. Anthropomorphic motion planning algorithms are often based on simplified models of human biomechanics, ignoring many details and complexities, which can result in the differences between the movements of robots and human, and the lack of biomechanical naturalness. As a result, biomechanical characteristics such as human bones, muscles, and joints must be more accurately and meticulously modeled to improve the realism and fidelity of robotic motion. Second, there is a lack of understanding of human movement variation. Human upper limb movement has some individual variation and can vary considerably from person to person. Moreover, unlike the lower limbs, the upper limbs do not have a single, periodic functional activity, which makes it difficult to establish a standardized experimental paradigm for the upper limbs. However, current anthropomorphic motion planning algorithms are typically modeled based on average motion data or data from a small number of subjects, ignoring the individual variation, which leads to a lack of personalization and diversity in robot motion. Third, there is a lack of in-depth research on neurophysiology and exercise physiology. Human upper limb movement involves the coordination of multiple neuromuscular systems and complex neural signaling control processes. However, current anthropomorphic motion planning algorithms have an insufficient understanding of these neurophysiological and exercise physiological mechanisms and lack detailed modeling and simulation of neuromuscular models and motor control signals. Therefore, further in-depth studies of neurophysiology and exercise physiology are needed to incorporate these physiological characteristics into anthropomorphic motion planning algorithms to improve the biomimicry and realism of motion. Fourth, the problem of motion planning and obstacle avoidance in complex environments has not been fully solved. In practical applications, robots often need to plan their motion and avoid obstacles in complex, dynamic environments. The potential failures coming from the unpredictability of robot-human interactions still troubles the users, which seriously hinders the large-scale application of humanoid robots. Further research is needed on how to generate adaptive and flexible anthropomorphic motion that take into account environmental constraints. This may involve the integration of perception, planning, and control, as well as accurate modeling and real-time updating of environmental information. Fifth, current humanoid robots still have limited autonomy and adaptability. Most anthropomorphic motion planning algorithms (including inverse kinematics methods (<xref ref-type="bibr" rid="B51">Li G. et al., 2019</xref>; <xref ref-type="bibr" rid="B52">Li et al., 2022</xref>), visual teaching (<xref ref-type="bibr" rid="B47">Kuniyoshi et al., 1994</xref>), reference path generation for upper limb rehabilitation exoskeletons (<xref ref-type="bibr" rid="B88">Soltani-Zarrin et al., 2017</xref>), optimal control methods (<xref ref-type="bibr" rid="B92">Ta&#xef;x et al., 2013</xref>; <xref ref-type="bibr" rid="B28">Geoffroy et al., 2014</xref>), etc.) rely on offline planning and must be pre-programmed or rely on external commands to perform the task, and are simply not capable of continuously performing complex tasks outside of a specific work environment. Sixth, the complexity of the human body&#x2019;s own movement laws leads to the fact that a single (or several) anthropomorphic criterion is still unable to describe most of the human movements, resulting in a lack of anthropomorphism in robot motion, which is far from natural human movement behavior. Therefore, the existing anthropomorphic motion planning methods are still not sufficient for practical applications.</p>
<p>Despite the existence of articles that provide a cursory overview of classification methods for anthropomorphic motion planning of robotic arms, including sampling-based (random search), optimization-based (constrained optimization), and imitation-based (demonstration learning) approaches, these overviews typically rely on simple distinctions based on the nature of the planning algorithms. However, they lack a systematic examination of the rationale behind the extraction of upper limb motion characteristics using these methods. Furthermore, these characteristics do not comprehensively capture the full range of upper limb motion patterns and fail to elaborate on their specific roles in the development of an anthropomorphic motion planning framework. Some research has revealed the existence of invariant motion characteristics in the natural movement of human upper limbs (<xref ref-type="bibr" rid="B86">Soechting and Lacquaniti, 1981</xref>; <xref ref-type="bibr" rid="B6">Atkeson and Hollerbach, 1985</xref>), which contribute to the uniqueness of human motion behavior and its difficulty to emulate or replicate. Despite the general similarity in morphological structure and motion patterns between current humanoid robotic arms and human upper limbs, the lack of comprehensive guidance from human upper limb movement laws prevents the achievement of highly anthropomorphic motion.</p>
<p>To facilitate the realization of more natural anthropomorphic motion in humanoid robotic arms, the key milestones and most recent literature have been collated and summarized, and three essential conditions have been identified. These are: 1) Motion redundancy. It is crucial for achieving the flexibility and accuracy of human upper limb movement through different motion patterns, serving as the foundation for humans&#x2019; robust motion capabilities and interactive abilities. 2) Motion variation. It accounts for the diversity and individual variation in human upper limb movement, representing a unique capacity for adaptation, self-learning, and continuous evolution. 3) Motion coordination. It ensures the efficiency and stability of human upper limb movement by functional control, providing a safeguard for generating and controlling motion while maintaining inertia. These three characteristics are interrelated and interdependent, as shown in <xref ref-type="fig" rid="F1">Figure 1</xref>, posing a challenge for the anthropomorphic motion planning framework. Therefore, this article systematically analyzes the anthropomorphic motion planning methods in recent years, with a particular focus on the concepts of motion redundancy, motion variation and motion coordination, and discusses the limitations and challenges.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>A frame of anthropomorphic motion planning system composed of three components.</p>
</caption>
<graphic xlink:href="fbioe-12-1388609-g001.tif"/>
</fig>
</sec>
<sec id="s2">
<title>2 Motion redundancy</title>
<p>Humans can adjust the posture of the upper limbs according to the position of the target to perform the task with appropriate movements, such as surgeons operating on small wounds according to different circumstances, &#x201c;cutting&#x201d;, &#x201c;suturing&#x201d;, &#x201c;knotting&#x201d; and others to ensure the success of the operation, all of which rely on the flexibility provided by the redundancy of the upper limbs. The redundancy of the upper limbs provides humans with a wealth of motor skills, adaptations, and means of perceptual communication that enhance their ability to interact and adapt with others and the environment.</p>
<p>In practical scenarios, robotic arms not only have to interact with humans, but also have to take into account obstacle avoidance and joint limitations. A common method for generating anthropomorphic motion trajectories is to preplan the collision-free waypoints of the end-effector in Cartesian space using path planning algorithms such as PRM (<xref ref-type="bibr" rid="B39">Kavraki et al., 1996</xref>), RRT (<xref ref-type="bibr" rid="B44">Kuffner and LaValle, 2000</xref>), and CHOMP (<xref ref-type="bibr" rid="B123">Zucker et al., 2013</xref>), and then establish a mapping relationship between Cartesian space and joint space, and solve for the joint angles of the robotic arm at different moments by inverse kinematics. The trajectories of each joint are generated by interpolation. Commonly used interpolation methods are cubic polynomials, quintic polynomials, and spline curves (<xref ref-type="bibr" rid="B110">Xie et al., 2011</xref>; <xref ref-type="bibr" rid="B125">Hu et al., 2023</xref>; <xref ref-type="bibr" rid="B53">Li et al., 2019</xref>; <xref ref-type="bibr" rid="B104">Wang et al., 2020</xref>). The key to this approach is to develop an appropriate kinematic/dynamical model for the upper limbs to solve the inverse kinematics.</p>
<p>In studies, human upper limbs are often regarded as an articulated structure composed of connecting rods (bones) and joints with a high degree of redundancy, where different joints cooperate in a variety of combinations to perform complex tasks according to different needs (<xref ref-type="bibr" rid="B46">Kulic et al., 2016</xref>; <xref ref-type="bibr" rid="B105">Wei and Zhao, 2019</xref>). It is generally accepted that the human upper limbs have 10 DoFs, while the 10-DoFs robotic arm is too flexible to control (<xref ref-type="bibr" rid="B56">Liu and Xiong, 2013</xref>). For simplicity, a simplified 7-DoFs robotic arm with only three joints: shoulder, elbow, and wrist, was as a sphere-revolute-sphere structure (<xref ref-type="bibr" rid="B109">Xia et al., 2021</xref>), which was roughly the same as the human upper limb in shape and motion style, and has become the mainstream approach. This kind of anthropomorphic design is the basis for realizing human-like behaviors (<xref ref-type="bibr" rid="B19">Fang et al., 2019</xref>). Meanwhile, a study proposed the use of Rapid Upper Limb Assessment (RULA) to evaluate the naturalness of the humanoid robotic arm configuration (<xref ref-type="bibr" rid="B116">Zacharias et al., 2011</xref>). However, the human-like appearance and configuration of a robotic arm alone is not enough to generate anthropomorphic motion. The reason is the generation mechanism of natural human movement is still not fully revealed, that is, how humans deal with redundancy in the upper limbs during complex motion. From a physiological perspective, the redundancy of the human upper limbs is primarily attributable to the central nervous system&#x2019;s ability to control the contraction and relaxation of muscle groups through intricate neural networks and signaling pathways. This control mechanism enables the precise control of multiple muscles corresponding to multiple joints, thereby facilitating the precise control of multiple joints in the upper limbs. The problem of redundancy in humanoid robotic arms exists at the level of kinematics and dynamics, which leads to an infinite number of inverse kinematics solutions. How to solve the inverse kinematics problem and find the best solution that fits the configuration of the robotic arm among countless solutions is one of the difficulties in anthropomorphic motion planning.</p>
<p>Overall, there are three typical redundancy problems in the human upper limbs: 1) Trajectory redundancy in Cartesian space. That is, for a given task, the hand has multiple realization paths with non-unique trajectories. 2) Trajectory redundancy in joint space. That is, given a time-varying motion trajectory of the hand, it is still not possible to uniquely determine the kinematic parameters such as direction, angle, and velocity of each joint over time. 3) Redundancy in joint muscle forces and moments. Even after the trajectories of the joints over time are determined, it is still not possible to uniquely determine the forces and moments exerted on the joints by each muscle. A plethora of studies have been conducted to investigate these issues. Based on the laws presented during the natural movement of the human upper limbs for classification, the prevailing methodologies can be broadly categorized into two main categories. The first category involves constraining the optimization of a cost function representing the natural motion characteristics. The second category leverages the unique relationship between the joints of the upper limbs presented in natural motion.</p>
<sec id="s2-1">
<title>2.1 Constrained optimization</title>
<p>A robotic arm may face multiple constraints, such as environmental constraints, task constraints, and coordination constraints, as it performs various operational tasks in an unstructured scenario. These constraints increase the difficulty of solving inverse kinematics. There is an argument that the redundancy problem can be viewed as a constrained optimization problem (<xref ref-type="bibr" rid="B101">Tommasino and Campolo, 2017</xref>). Solving inverse kinematics is essentially solving a nonlinear optimization problem where the optimal solution can be obtained by minimizing the cost function as follows<disp-formula id="e1">
<mml:math id="m1">
<mml:mrow>
<mml:mfenced open="{" close="" separators="&#x7c;">
<mml:mrow>
<mml:mtable columnalign="center">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>min</mml:mi>
<mml:mtext>&#x2002;</mml:mtext>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mtext>&#x2009;over&#x2009;</mml:mtext>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi mathvariant="normal">s</mml:mi>
<mml:mo>.</mml:mo>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mo>.</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:msub>
<mml:mi>g</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2264;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>where <inline-formula id="inf1">
<mml:math id="m2">
<mml:mrow>
<mml:mi>f</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is a nonlinear function/cost function, <inline-formula id="inf2">
<mml:math id="m3">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the state vector of the robotic arm, that is, the values for all joint space. In motion planning, <inline-formula id="inf3">
<mml:math id="m4">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is not a single state at a given moment, but all states along the entire planning path, that is, <inline-formula id="inf4">
<mml:math id="m5">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#x22ef;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#x22ef;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>T</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>. <inline-formula id="inf5">
<mml:math id="m6">
<mml:mrow>
<mml:mi>g</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is an inequality constraint, including linear and nonlinear constraints, which aims to strictly control the feasibility of the trajectory of the robotic arm and ensure that the robotic arm does not collide with objects. <inline-formula id="inf6">
<mml:math id="m7">
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the equation constraint, that is, the target assigned to the robotic arm.</p>
<p>The cost function is mainly derived from the laws of natural movement of upper limbs. The study of point-to-point reaching movement of the human upper limbs revealed two of the most important motion characteristics, straight paths and bell-shaped speed profiles (<xref ref-type="bibr" rid="B23">Flash and Hogan, 1985</xref>; <xref ref-type="bibr" rid="B98">Todorov, 2004</xref>). In (<xref ref-type="bibr" rid="B3">Arimoto and Sekimoto, 2006</xref>), these two characteristics were used as criteria to determine the degree of anthropomorphism of the robotic arm&#x2019;s trajectories. However, the criteria may not be sufficient for complex upper limb movement such as manual dexterity tasks like writing with a pen, threading a needle, or carving with a knife (<xref ref-type="bibr" rid="B84">Shin and Kim, 2015</xref>). To solve this problem, by thoroughly studying the smoothness of the upper limb trajectories, <xref ref-type="bibr" rid="B99">Todorov and Jordan (1998)</xref> found that a significant acceleration can cause a shock or jolt in the movement process, and that the human body maintains a very small acceleration during movement to prevent self-injury of the musculoskeletal system, which led to the proposal of an optimization criterion that used the minimum acceleration as a cost function. <xref ref-type="bibr" rid="B54">Li et al. (2020)</xref> used the minimum potential energy as an optimization criterion to achieve precise control of the motion to reduce the magnitude of the potential energy change of the upper limb exoskeleton and ensure the smoothness of the motion trajectory. In addition, many studies also used other laws of motion as cost functions, such as minimum torque (<xref ref-type="bibr" rid="B38">Kang et al., 2003</xref>), minimum time (<xref ref-type="bibr" rid="B96">Tangpattanakul and Artrit, 2009</xref>), and minimum joint torque change (<xref ref-type="bibr" rid="B103">Wada et al., 2001</xref>). These cost functions were used to explain the principles of how humans generate natural movement, that is, the existence of invariant motion characteristics in the upper limbs that are independent of factors such as target, motion magnitude and direction, initial position, and external loads. These criteria chosen for the cost function were derived from the regular analysis of human upper limb trajectories, which represent the common characteristics of most upper limb movement, and were used in most studies to evaluate whether the trajectory is anthropomorphic or not.</p>
<p>However, the application scenarios of the above methods are limited to point-to-point movement (where the shoulder is assumed to be stationary during movement) and are not suitable for activities of daily living (scenarios where the center of the shoulder is moving in real time). Therefore, it has been suggested that a single cost function can only partially explain the anthropomorphism of the upper limb movement, which only works under special movement and cannot be applied to most scenarios. As a result, it does not provide enough flexibility to the robotic arm. The combination of several cost functions may be the solution to this problem (<xref ref-type="bibr" rid="B10">Berret et al., 2011</xref>).</p>
<p>By investigating the interaction between nonlinear muscle dynamics and control principles based on previous work, <xref ref-type="bibr" rid="B107">Wochner et al. (2020)</xref> argued that the human body follows a combination of independent and recognized criteria for optimality when controlling the upper limbs to generate optimal trajectories. They used the combined cost function of smoothness (to prevent damage to the musculoskeletal system itself), energy (to reduce energy consumption during movement), and internal force (necessary for human movement) as a new optimization criterion to reveal the contribution of human muscle dynamics in point-to-manifold motion, which in turn generates anthropomorphic trajectories. Based on this research, <xref ref-type="bibr" rid="B1">Albrecht et al. (2011)</xref> assigned weighting factors to different optimization criteria to combine them into a new cost function, and simplified the multi-objective optimization problem by adjusting the weight factors to balance the relationship between multiple objectives, thus finding the optimal anthropomorphic trajectory that matches the configuration of the robotic arm. For conciseness, a brief summary of constrained optimization methods is shown in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Approaches of constrained optimization.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Study</th>
<th align="center">Task</th>
<th align="center">Anthropomorphic criterion</th>
<th align="center">Approach</th>
<th align="center">Contribution</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">
<xref ref-type="bibr" rid="B99">Todorov and Jordan (1998)</xref>
</td>
<td align="center">Complex arm movements</td>
<td align="center">Maximum smoothness</td>
<td align="center">Constrained minimum-jerk model</td>
<td align="center">Stronger relationship between the path and the speed profile</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B103">Wada et al. (2001)</xref>
</td>
<td align="center">Point-to-point movements</td>
<td align="center">Minimum commanded torque change</td>
<td align="center">A prediction algorithm using the Euler-Poisson equation</td>
<td align="center">Obtain the converged solution in a very short time</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B38">Kang et al. (2003)</xref>
</td>
<td align="center">Reaching movements</td>
<td align="center">Minimum joint torque</td>
<td align="center">Minimum-torque model</td>
<td align="center">Determine arm configurations during normal and natural movements</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B3">Arimoto and Sekimoto (2006)</xref>
</td>
<td align="center">Reaching movements</td>
<td align="center">Straight paths, bell-shaped speed profiles</td>
<td align="center">Virtual spring-damper hypothesis</td>
<td align="center">Resolve the ill-posedness of inverse kinematics</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B96">Tangpattanakul and Artrit (2009)</xref>
</td>
<td align="center">Simulation of consecutive via-points</td>
<td align="center">Minimum time</td>
<td align="center">Harmony search algorithm</td>
<td align="center">Obtain the optimal interval time and reduce complication and time consuming</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B1">Albrecht et al. (2011)</xref>
</td>
<td align="center">Reaching-to-a-bar tasks</td>
<td align="center">mechanical energy, joint smoothness</td>
<td align="center">Inverse optimal control</td>
<td align="center">Support the cost combination hypothesis</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B84">Shin and Kim (2015)</xref>
</td>
<td align="center">Reaching, grasping, moving an object</td>
<td align="center">Compare the hand and elbow trajectories through the simulations and experiments</td>
<td align="center">Lagrangian multiplier optimization method</td>
<td align="center">Human-likeness depends on the purpose of given tasks</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B54">Li et al. (2020)</xref>
</td>
<td align="center">Path tracking</td>
<td align="center">Minimal potential energy</td>
<td align="center">Zeroing dynamics method</td>
<td align="center">Track the desired motion path accurately</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B107">Wochner et al. (2020)</xref>
</td>
<td align="center">Point-to-manifold reaching movements</td>
<td align="center">Smoothness, energy, internal force</td>
<td align="center">Bayesian optimization</td>
<td align="center">A mixed cost function replicates the behavior much better than single criterion</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>These approaches can accurately identify the optimal solution that satisfies the constraints through mathematical optimization techniques. However, they entail a significant computational burden, particularly when searching for the optimal solution in a high-dimensional space, which may result in high computational costs.</p>
</sec>
<sec id="s2-2">
<title>2.2 Special relationships between joints</title>
<p>In addition to optimization methods, dimensionality reduction is another idea for dealing with redundancy: explore the special relationships between the shoulder, elbow, and wrist in the natural motion of the upper limbs to reduce redundant DoFs so as to obtain optimal inverse kinematic solutions.</p>
<p>The complete movement process of the upper limbs can be regarded as a process quantity. Each moment in the process corresponds to the posture of the upper limb and can be considered as a state quantity. The solution of the inverse kinematics of redundant arms can be decomposed into a finite number of state quantities. In order to find a suitable state quantity to describe the upper limb posture, the concept of arm triangle was introduced in (<xref ref-type="bibr" rid="B9">Berman et al., 2008</xref>). In (<xref ref-type="bibr" rid="B83">Seraji, 1989</xref>), a plane consisting of the shoulder, elbow, and wrist joints is used to describe the posture of the upper limbs, and the upper limbs are free to rotate around the shoulder and elbow joints, respectively. <xref ref-type="bibr" rid="B58">Liu et al. (2016)</xref> proposed a wrist-elbow-in-line method based on the similarity of the kinematic structures of the human upper limb and the humanoid robotic arm. The method introduced the elbow and wrist joint positions as key positions and reduced redundancy by using them as end-effector orientation constraints of the robotic arm. The positions of elbow and wrist joints in Cartesian space were used as configuration parameters of the robotic arm, and the anthropomorphic configuration was obtained by inverse kinematic analysis. However, due to the different lengths and joint limitations of the human upper limb and the robotic arm, the robotic arm is unable to create an anthropomorphic configuration at all times, which makes it difficult to perform fully anthropomorphic motion throughout the workspace. <xref ref-type="bibr" rid="B5">Artemiadis et al. (2010)</xref> realized that the shoulder and elbow joints are more flexible than the wrist joint by observing human writing movements. They also found that the three joints of the shoulder, elbow, and wrist are highly interconnected to form a specific plane, and this plane is deflected during movement. The angle of rotation formed by the deflection is unique, and is defined as the elbow elevation angle (<xref ref-type="bibr" rid="B42">Kim et al., 2006</xref>). Then they used the constraint equation formed by the elbow elevation angle to reduce the redundancy to obtain the kinematic inverse solution, and then obtain the best anthropomorphic motion trajectory that meets the human posture. However, this method ignores the effect of wrist posture on upper limb movement. <xref ref-type="bibr" rid="B118">Zanchettin et al. (2011)</xref> improved this method by taking wrist posture into account and using least-squares cluster analysis to derive the relationship with elbow elevation. <xref ref-type="bibr" rid="B41">Kim et al. (2012)</xref> proposed an inverse kinematics-based rotation angle estimation algorithm by linearly combining two different rotation angles resulting from kinematic and dynamic constraints. The algorithm successfully reproduced the natural motion of the human upper limbs with an error of less than 5&#xb0; compared to real human movement and can be applied to wearable exoskeleton robots. <xref ref-type="bibr" rid="B90">Su et al. (2019)</xref> used a new deep convolutional neural network to establish the mapping relationship between rotation angle and hand pose, which improves the accuracy and iteration speed of motion reconstruction with strong robustness. A brief summary of using special relationships between joints to solve motion redundancy listed in <xref ref-type="table" rid="T2">Table 2</xref>.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Approaches of special relationships between joints.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Study</th>
<th align="center">Task</th>
<th align="center">Anthropomorphic criterion</th>
<th align="center">Approach</th>
<th align="center">Contribution</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">
<xref ref-type="bibr" rid="B42">Kim et al. (2006)</xref>
</td>
<td align="center">Point-to-point hand motion</td>
<td align="center">Elbow elevation angle</td>
<td align="center">Response surface methodology</td>
<td align="center">First propose a mathematical representation for characterizing human arm motion</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B118">Zanchettin et al. (2011)</xref>
</td>
<td align="center">Hand motion along a sphere</td>
<td align="center">Swivel angle</td>
<td align="center">Cluster and weighted least-square approach</td>
<td align="center">Provide a repeatable and identifiable kinematic constraint</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B41">Kim et al. (2012)</xref>
</td>
<td align="center">Natural human arm movement</td>
<td align="center">Swivel angle</td>
<td align="center">Kinematic and dynamic constraint</td>
<td align="center">Reproduces natural human arm movement with less than five degrees of estimation error</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B58">Liu et al. (2016)</xref>
</td>
<td align="center">Self-motion</td>
<td align="center">Self-motion angle</td>
<td align="center">Wrist-elbow-in-line method</td>
<td align="center">Validated in practice and extended for obstacle avoidance</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B90">Su et al. (2019)</xref>
</td>
<td align="center">Swivel motion</td>
<td align="center">Elbow angle</td>
<td align="center">Deep convolutional neural network</td>
<td align="center">Reduce online prediction time, noise robustness</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In comparison to complex mathematical optimization methods, these methods may exhibit higher computational efficiency and be suitable for application scenarios with high real-time requirements. However, due to the complexity of human motion, a single natural motion relation may not be applicable to all types of redundancy problems, thus requiring customized designs for different problems.</p>
<p>The motion redundancy in the upper limbs is of great importance as a primary solution in anthropomorphic motion planning. A variety of inverse kinematics methods proposed by the researchers provide ideas for solving the redundancy problem, which greatly advance the development of anthropomorphic motion planning. However, there are still some problems with current methods: inverse kinematics solution methods in the joint space lack sufficient physiological basis and it is inadequate to ensure the variations in the motion process and vulnerable to interference.</p>
</sec>
</sec>
<sec id="s3">
<title>3 Motion variation</title>
<p>Humans can accomplish the same task in different ways, for example, when a blacksmith repeatedly strikes an iron block, the trajectory of the strike is different each time, which suggests that there is no rigidly fixed pattern of repetitive movements. We call this phenomenon motion variation. A large number of studies have confirmed that kinematic variation is considered to be a control strategy for the human motor system and also an intrinsic characteristic of multi-degree-of-freedom limb motion (<xref ref-type="bibr" rid="B48">Latash et al., 2002</xref>).</p>
<p>Motion variation represents the diversity of human movement, which is the difference in control, motion patterns, and experience habits of different individuals. These differences include the pattern of muscle activity, the variation of joint angles, and the way of force application, which are indispensable for humanoid robots to realize anthropomorphic motion planning. How do humanoid robots exploit these differences in motion? Imitating and learning human movement may solve this problem.</p>
<p>It is a great idea to accurately reproduce human movement on a robot. A motion capture system can be utilized to gather data on natural human movement and construct a motion database. By comparing the end-effector trajectory with the database, the robot joint configuration and motion trajectories can be predicted (<xref ref-type="bibr" rid="B112">Yamane, 2020</xref>). The approach not only avoids the joint redundant, but also controls the grasping force and posture, and predicts the position and time of grasping. However, the large dataset may reduce the prediction efficiency. The human position is collected by the motion capture system in advance and then converted into joint angle information offline, then sent to the robot controller for execution, so that the robot&#x2019;s movements are modeled after human movements, which is only applicable in weak interaction scenarios (<xref ref-type="bibr" rid="B55">Liu et al., 2012</xref>; <xref ref-type="bibr" rid="B124">Zuher and Romero, 2012</xref>). However, the limitation of these approaches is that they rely exclusively on pre-collected data that do not cover all possible human-robot interaction situations. Consequently, the robot&#x2019;s responses may not be sufficiently flexible. Furthermore, since the robots&#x2019; movements are entirely derived from pre-existing human movements, they can only move in a repetitive manner, which greatly limits their use in humanoid robots that require frequent interaction with the outside world.</p>
<p>To address this issue, it is necessary to adopt a more flexible and adaptable approach to the movement of humanoid robots. In addition to replicating human movements, the robot must also learn the motion patterns of the human upper limbs, which enables robots to transfer human motor skills into their own systems in a straightforward manner and employ appropriate methods to replicate anthropomorphic motor trajectories. These trajectories exhibit similar or comparable motor characteristics to humans, making them suitable for practical applications.</p>
<p>The motion patterns of the human upper limbs are unique and rely on a strong learning capacity, which enables humans to adapt to complex and changing environments based on previous experience (<xref ref-type="bibr" rid="B37">Huang and Zhang, 2020</xref>). Even without prior knowledge, humans are still able to interact correctly with objects in the surrounding environment. (<xref ref-type="bibr" rid="B67">Nagahama et al., 2021</xref>). The variation and adaptability of humans are crucial in achieving effective motion in various situations. It has been a challenging problem to equip robots with learning and motor skills of humans. Researchers attempt to understand the laws governing upper limb movement at the physiological level and map human behavior patterns to robotic motion strategies.</p>
<p>Neurophysiological studies have shown that the natural movement of the human upper limbs can be decomposed into a large number of small movement units that can be combined in an orderly fashion to produce a variety of complex movement, which researchers call movement primitives (<xref ref-type="bibr" rid="B30">Giszter, 2015</xref>). In fact, research results from several fields has shown that human upper limb movement exhibits &#x201c;primitive&#x201d; properties at the level of brain motor cortex (<xref ref-type="bibr" rid="B7">Averbeck et al., 2002</xref>), kinematics (<xref ref-type="bibr" rid="B80">Rohrer et al., 2002</xref>), and dynamics (<xref ref-type="bibr" rid="B66">Mussa-Ivaldi and Bizzi, 2000</xref>). Therefore, the movement primitives can be regarded as the implicit embodiment of human motion characteristics, which cannot only explain the law governing upper limb movement and enhance the understanding of their own motion, but also serve as a carrier to transfer the movement law from the human upper limb to the humanoid robotic arm, so as to make its motion anthropomorphic.</p>
<p>Based on the human upper limb movement dataset, the researchers proposed a demonstration-learning-reconstruction method to extract the movement primitives. Firstly, they used human natural motion data as demonstration trajectories. Then, they constructed a learning model using statistical methods to encode them. Finally, they applied upper limb motion characteristics to the robotic arm motion to reconstruct similar behaviors. The most common learning models are Gaussian mixture model (GMM), Dynamic movement primitives (DMP), and hidden Markov model (HMM). Specifically, GMM has powerful coding and noise reduction capabilities and is often used to solve high-dimensional problems. <xref ref-type="bibr" rid="B17">Deng et al. (2020)</xref> proposed a strategy for learning human motor skills using GMM, which allowed robots to learn how to successfully perform fixed impedance-based tasks and achieve safe human-robot cooperation. DMP has strong adaptability and robustness. <xref ref-type="bibr" rid="B49">Lauretti et al. (2019)</xref> proposed a method to obtain joint space and Cartesian space anthropomorphic trajectories using DMP and extracted DMP parameters as motion characteristics to obtain obstacle avoidance trajectories via locally weighted regression, which was finally experimentally validated on a humanoid robotic arm LWR4&#x2b; (KUKA, Augsburg, Germany). HMM has strong predictive ability and can easily extract motion characteristics. <xref ref-type="bibr" rid="B93">Takano and Nakamura (2017)</xref> proposed a method for extracting motion data information using HMM, which could control the moments of all joints of a humanoid robot to achieve the desired contact force and overall motion. Furthermore, <xref ref-type="bibr" rid="B119">Zhang et al. (2020)</xref> proposed a new anthropomorphic motion control framework using GMM and DMP to learn the demonstration trajectories and generate the anthropomorphic motion trajectories, which was tested on a mobile service robot to prove its effectiveness.</p>
<p>The anthropomorphic motion generated by the above work are all simple reaching movement. There are also many studies that have reproduced complex movement. <xref ref-type="bibr" rid="B75">Pignat and Calinon (2017)</xref> used a hidden semi-Markov model (HSMM) to enable the robot to successfully assist humans in dressing. <xref ref-type="bibr" rid="B43">Koenig and Matari&#x107; (2016)</xref> used Bayesian networks to enable the robot to perform basic movement such as grasping and releasing. <xref ref-type="bibr" rid="B65">M&#xfc;lling et al. (2013)</xref> and <xref ref-type="bibr" rid="B13">Calinon et al. (2010)</xref> developed table tennis robotic systems for anthropomorphic motion using mixture of motor primitives (MoMP), HMM and Gaussian mixture regression (GMR), respectively. <xref ref-type="bibr" rid="B114">Yi et al. (2022)</xref> developed an autonomous robotic grasping system using an imitation learning algorithm consisting of K-means clustering and DMP, which could be finely manipulated using a variety of machine learning methods, and proved its reliability through evaluation. There are also studies on improving individual algorithms or combining multiple algorithms to improve iterative efficiency and reproduction accuracy, such as task-parameterized GMM is used to learn the demonstration trajectory to obtain motion characteristics, which enables the robot to perform the dual-arm sweeping task smoothly (<xref ref-type="bibr" rid="B85">Silv&#xe9;rio et al., 2015</xref>). However, the reference movement for demonstration learning relies on the richness of experimental data. When adding new sample data to the training model for training, it is common practice to retrain the original model after increasing the number of network layers or changing the structure, which consumes a lot of time. This problem is simplified by the broad learning system based on incremental learning principle (<xref ref-type="bibr" rid="B37">Huang and Zhang, 2020</xref>). In the broad learning system, even if new sample data is added, there is no need to retrain the existing structure and parameters even if new sample data is added. We only need to compute the added parameters and assign new computational weights to easily achieve incremental learning of input samples, characteristic nodes, and enhancement nodes.</p>
<p>Even as the dataset continues to grow, new questions also arise. Researchers expect that robots with human motor skills will also have the ability to understand and predict in the same way that humans do. Humans participate in interactions by predicting the behavior of others (<xref ref-type="bibr" rid="B106">Wenderoth et al., 2012</xref>), while the robot&#x2019;s motion commands are issued by a controller, whose output commands are preset by a human input program, and the accuracy of the preset program commands affects the anthropomorphism of the movement trajectories to some extent. The anthropomorphic motion trajectories generated by demonstration learning are too dependent on the reference trajectory, which means that changes in the content of the demonstration may lead to different extracted movement primitives. Therefore, each trajectory iteration accumulates small prediction errors, which leads to the deformation of the robotic arm motion (<xref ref-type="bibr" rid="B81">Sasagawa et al., 2021</xref>). To overcome this problem and avoid the chance of the parameters of the upper limb model, <xref ref-type="bibr" rid="B113">Yang et al. (2021)</xref> combined the multiple characteristics of the human upper limb movement process, adopted the reward function, and used reinforcement learning to plan the anthropomorphic motion of the humanoid robotic arm, and verified the feasibility and validity of the robotic arm in anthropomorphic motion through experiments.</p>
<p>Furthermore, imitation-based motion planning algorithms commonly utilize motion datasets derived from demonstrative samples to create motion models. However, these models display limited generalization, thereby limiting their usability in unstructured scenarios. As a result, the motion variation of the upper limbs is compromised. Therefore, to enhance robots&#x2019; capacity to mimic human-environment interaction, it is crucial to enhance the generalization of these models. The main factors affecting the generalization ability are the unknown environment and the targets. Learning reference inputs through DMP algorithm and adaptive optimal admittance control method can effectively improve the robot&#x2019;s ability to interact with unknown environment (<xref ref-type="bibr" rid="B111">Xue et al., 2022</xref>). Compared to other algorithms, the traditional DMP algorithm has excellent generalization and anti-interference capabilities (<xref ref-type="bibr" rid="B31">Gong et al., 2020</xref>). However, the limitation of this algorithm is that when the demonstration trajectory is learned, the trajectory characteristics represented by the basis functions are fixed. Even if the starting point and scaling factor are changed, the result is only a change in speed and the scaling of the trajectory, which cannot be applied to different complex tasks and environments. Although some studies have improved the DMP by adding constraints, the results are still unsatisfactory (<xref ref-type="bibr" rid="B26">Gams et al., 2014</xref>; <xref ref-type="bibr" rid="B36">Huang et al., 2019</xref>). <xref ref-type="bibr" rid="B77">Qian et al. (2020)</xref> proposed a hierarchical demonstration learning framework that combined symbolic and trajectory learning to improve a robot&#x2019;s ability to adapt to new tasks and environmental changes. <xref ref-type="bibr" rid="B59">Lu et al. (2023)</xref> combined DMP with neural networks and admittance control to incrementally update the nonlinear function by adding new basis functions and weights to mimic the new trajectory, and finally experimentally demonstrated that the generalization of the trajectory was improved. <xref ref-type="bibr" rid="B8">Averta et al. (2020)</xref> used functional principal component analysis (fPCA) to extract functional principal components/basis functions (describing the motion variance of each joint trajectory at the time level) from human upper limb movement data, and argued that a general upper limb motion trajectory can be described as an ordered combination of a set of functional principal components, and that an anthropomorphic motion trajectory could be generated by optimizing the weights of these functional principal components.</p>
<p>In order to quantify the variation of human movement, <xref ref-type="bibr" rid="B29">Gielniak et al. (2013)</xref> adopted variance as a variable in the algorithm when studying anthropomorphic motion planning, and used variance as a measure of the motion variation, and concluded through experiments that highly constrained movement or body parts have less variance (or motion variability), which is basically the same as the intuitive feeling of human movement. <xref ref-type="table" rid="T3">Table 3</xref> gives an overview of 12 approaches to solve motion variation.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Approaches to solve motion variation.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Study</th>
<th align="center">Task</th>
<th align="center">Anthropomorphic criterion</th>
<th align="center">Approach</th>
<th align="center">Contribution</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">
<xref ref-type="bibr" rid="B13">Calinon et al. (2010)</xref>
</td>
<td align="center">Hitting a ball with a table tennis racket</td>
<td align="center">Movement primitives</td>
<td align="center">HMM and GMR</td>
<td align="center">Present and evaluate an approach to allow robots to acquire new skills</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B65">M&#xfc;lling et al. (2013)</xref>
</td>
<td align="center">Striking movements in table tennis</td>
<td align="center">Movement primitives</td>
<td align="center">MoMP</td>
<td align="center">Presented a framework that allows a robot to play table tennis with a human</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B29">Gielniak et al. (2013)</xref>
</td>
<td align="center">Mimicking performance</td>
<td align="center">Spatiotemporal correspondence</td>
<td align="center">Human-like and variance optimization</td>
<td align="center">Present a quantitative metric for human-like motion</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B43">Koenig and Matari&#x107; (2016)</xref>
</td>
<td align="center">A set of basic actions</td>
<td align="center">A series of actions with features</td>
<td align="center">Bayesian networks</td>
<td align="center">Presents a framework for lifelong robot task learning from demonstrations</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B75">Pignat and Calinon (2017)</xref>
</td>
<td align="center">Dressing task</td>
<td align="center">Movement primitives</td>
<td align="center">HSMM</td>
<td align="center">Propose a method for efficient skill acquisition</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B93">Takano and Nakamura (2017)</xref>
</td>
<td align="center">Touching an object with the right hand</td>
<td align="center">Synthesis of joint angle sequences</td>
<td align="center">HMM</td>
<td align="center">Propose a method for motion synthesis and force control</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B49">Lauretti et al. (2019)</xref>
</td>
<td align="center">Reaching and pouring task</td>
<td align="center">Four performance indices</td>
<td align="center">Hybrid Joint/Cartesian DMPs</td>
<td align="center">100% in avoiding obstacles and high Cartesian accuracy</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B17">Deng et al. (2020)</xref>
</td>
<td align="center">Drawing specific lines</td>
<td align="center">Movement primitives</td>
<td align="center">GMM</td>
<td align="center">Present a hierarchical control scheme for human-robot co-manipulation</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B119">Zhang et al. (2020)</xref>
</td>
<td align="center">Feeding meals to patients</td>
<td align="center">Human activity recognition</td>
<td align="center">Combine GMM with DMP</td>
<td align="center">Propose a novel human-like control framework for the mobile medical service robot</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B8">Averta et al. (2020)</xref>
</td>
<td align="center">30 activities of daily living</td>
<td align="center">A variety of movement parameters</td>
<td align="center">fPCA</td>
<td align="center">Embed synergies of human movements for robot motion generation</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B113">Yang et al. (2021)</xref>
</td>
<td align="center">Reaching motion</td>
<td align="center">Feature variables of human arm</td>
<td align="center">Reinforcement learning</td>
<td align="center">Present a humanoid method, and verify humanization, feasibility, and effectiveness</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B114">Yi et al. (2022)</xref>
</td>
<td align="center">Grasping complex-shaped objects</td>
<td align="center">Movement primitives</td>
<td align="center">K-means clustering and DMP</td>
<td align="center">Presents an autonomous grasping approach for complex-shaped objects</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Most of the current methods to solve the problem of robot motion variation are demonstration learning. Although they can effectively reproduce human upper limb movement under specific environments and tasks, the generalization ability is weak and difficult to apply to complex scenarios. In addition, the data samples must be expanded to improve the accuracy of the motion, but the large number of operations severely affects the efficiency of the robot motion. Therefore, the study of motion variation from a statistical point of view alone remains deficient and needs to be synthesized in terms of the hypostasis of human movement.</p>
</sec>
<sec id="s4">
<title>4 Motion coordination</title>
<p>The human body is a complex bio-motor system, and each of its movement behaviors requires coordination inside and outside the body, that is, the ability of a system&#x2019;s various joints, components, or systems to work together and cooperate with each other in the execution of complex actions or task, which enables the system to achieve the efficient and precise completion of the task. In particular, the upper limbs play a very important role in human life (the function of the upper limbs accounts for about 60% of the whole body), and almost all daily activities require some coordination between the upper limbs (<xref ref-type="bibr" rid="B33">Guiard, 1987</xref>), and the level of coordination directly affects human movement ability (<xref ref-type="bibr" rid="B25">Freitas et al., 2016</xref>).</p>
<p>To successfully perform a daily activity, the human body requires a number of sensory organs to process information and control upper limb movement, a process whose mechanism is not fully understood. In neurophysiology, there is evidence that the central nervous system is responsible for the vast majority of human movement. When confronted with different external stimuli, humans are always able to respond appropriately, which relies heavily on human sensorimotor modeling (<xref ref-type="bibr" rid="B108">Wolpert and Ghahramani, 2000</xref>). According to the model, there is a relationship between sensory inputs and motor outputs in the human body, in which the particular patterns present are likely to be the criteria for the generation and control of movement by the central nervous system, which provides a physiological basis for anthropomorphic motion planning for robotic arms.</p>
<p>Some studies have designed mechanical musculoskeletal structures that mimic the human upper limbs based on the musculoskeletal kinesiology, and have used control strategies involving internal force kinematics (<xref ref-type="bibr" rid="B91">Tahara et al., 2006</xref>) to reproduce muscle activities as closely as possible in a biological motion pattern (<xref ref-type="bibr" rid="B68">Northrup et al., 2001</xref>), which provides an achievable platform for anthropomorphic motion planning. On this basis, how to provide the humanoid robotic arm with highly anthropomorphic motion ability becomes a challenge.</p>
<p>When people interact with the outside world, the whole process from contacting information to making a response is about 0.2&#x2013;0.4&#xa0;s (<xref ref-type="bibr" rid="B72">Otaki and Shibata, 2019</xref>). It remains an unsolved question how humans can easily coordinate multiple redundant DoFs of the body in a short period of time during movement. The causes of motion coordination are multifaceted and can stem from both intrinsic and extrinsic factors. The physiological basis of motion coordination is synergy, which is a key component throughout the entire motion process of the upper limbs and changes accordingly with different motion patterns. Motion coordination is specifically manifested as the precise control of the timing and spatial position of multi-degree-of-freedom movements during the movement process (temporal and spatial coordination), which is mainly dependent on the control of the nervous system. The internal neural control is further complemented by the coordination of the arms (inter-arm coordination) and body language (coordination of different limbs), which enables the coordinated movement of the limbs. Motion coordination of the upper limbs is a key component of the human motor system, which relies on the central nervous system and the cooperation of multiple muscles and joints, and involves fluidity, timing, and precision of movement, which is a challenge that is still not fully solved in the anthropomorphic motion planning system.</p>
<p>Classical neuromechanics suggests that the central nervous system relies on the interlocking of the muscular and skeletal systems to coordinate body movement, which is often called &#x201c;synergy&#x201d;. Recent research has revealed the existence of synergies at three levels, including kinematics, muscle mechanics, and neural centers (<xref ref-type="bibr" rid="B12">Bruton and O&#x2019;dwyer, 2018</xref>), and has been widely applied to robotic arms to reproduce reaching movement (<xref ref-type="bibr" rid="B57">Liu et al., 2018</xref>) and grasping movement (<xref ref-type="bibr" rid="B21">Ficuciello et al., 2014</xref>) of the upper limbs. During human movement, the nervous system dynamically adjusts the synergies by regulating the control strategy to control the coordinated movement of the limbs to meet the requirements of the task. Hierarchical theory states that human high-level motion control units focus on generating upper limb configurations during reaching movement, and that low-level motor units synergistically control the joints associated with the movement to ensure coordination of upper limbs (<xref ref-type="bibr" rid="B32">Gosselin-Kessiby et al., 2008</xref>; <xref ref-type="bibr" rid="B35">Herbort and Butz, 2010</xref>). Correspondingly, by investigating the role of different synergy components in the reaching movement, <xref ref-type="bibr" rid="B95">Tang et al. (2019)</xref> found that the high percentage synergy is related to the movement trend, while the low percentage synergy is related to the specific task movements. Different principal components have some effects on the movement trajectory and endpoint accuracy, and the synergies are dynamically adjusted with different tasks. At the same time, the expressions of synergies in different motion patterns vary. <xref ref-type="bibr" rid="B121">Zhao et al. (2022)</xref> extracted the synergies under different numbers of trials and different arrival directions in point-to-point reaching movement experiments and found that the synergies increased with the number of trials or the number of arrival directions. When the number of experiments or the number of arrival directions reached a threshold, the synergies did not change significantly. The researchers hypothesized that different training patterns (number of trials, target category) affected muscle activation modules, which in turn affected synergies.</p>
<p>The study of synergistic movement of upper limbs is based on the foundation that humans activate discrete motion modules to perform biological activities through the cooperation and collaboration of different muscle groups to meet the needs of basic daily activities (<xref ref-type="bibr" rid="B11">Bizzi and Cheung, 2013</xref>). It is mainly categorized into three main components: synergistic control of nervous system (<xref ref-type="bibr" rid="B16">d&#x2019;Avella, 2016</xref>), synergistic contraction of muscles (<xref ref-type="bibr" rid="B74">Pham et al., 2014</xref>; <xref ref-type="bibr" rid="B94">Tang et al., 2014</xref>), and synergistic movement of joints (<xref ref-type="bibr" rid="B27">Garcia-Rosas et al., 2018</xref>; <xref ref-type="bibr" rid="B63">Moiseev and Gorodnichev, 2022</xref>). The central nervous system receives information from the outside world as input, integrates and processes it to generate motor commands. The commands are transmitted and delivered, whose output is manifested as precise synergistic control of the relevant muscles and joints. The upper limb muscle groups take the received motor commands as input, then trigger the synergistic contraction of the corresponding muscles depending on the complexity of the commands, to produce the appropriate force to control skeletal movement. The synergistic contraction of muscles causes the attachment points on the corresponding bones to move, resulting in the simultaneous movement of multiple bones, which in turn causes synergistic movement of joints. The overall motion of the upper limbs is controlled by the motion of specific bones, whose specific path of motion is determined by the additional motion of specific joints. At the same time, the sensory and feedback mechanisms of the nervous system are able to provide timely information to the brain about the position, force, and movement status of the upper limbs, thus realizing more precise synergistic movement.</p>
<p>The study of synergistic movement of upper limbs is based on the foundation that humans activate discrete motor modules to perform biological activities through the cooperation and collaboration of different muscle groups to meet the needs of basic daily activities (<xref ref-type="bibr" rid="B11">Bizzi and Cheung, 2013</xref>). An important part of the synergistic movement of the upper limbs is the synergistic contraction of the muscles, which is accompanied by the synergistic movement of the muscles. <xref ref-type="bibr" rid="B15">Coscia et al. (2014)</xref> studied hand trajectories and shoulder and elbow angular displacement trajectories of an upper limb weight support in different horizontal planes, analyzed the synergy patterns of muscles, and found that modular organization activated by synergistic movement of muscle groups underlies upper limb reaching movement generation.</p>
<p>Synergistic movement of upper limbs also involve motion learning and memory processes. The human nervous system uses hundreds of millions of nerve cells to precisely regulate the body&#x2019;s more than 600 muscles, turning flexion, extension, rotation, and grasping into functions that can run in the background without thinking. Through constant practice and repetitive movement, the brain can gradually build up the appropriate neural pathways and patterns to form a memory of muscle coordination.</p>
<p>Theoretical perspectives related to neuroscience and motion control suggest that the central nervous system views the multiple Dofs of the upper limbs as a luxury tool rather than a burden of control. In motion control of the human body, it is not necessary for the nervous system, which is the endpoint, to control all DoFs, which can lead to a lack of stability in the system. In motion coordination, stability and coordination do not coexist. To resolve this contradiction, <xref ref-type="bibr" rid="B82">Scholz and Sch&#xf6;ner (1999)</xref> skillfully combine stability and coordination by designing experiments using a dynamical systems approach to approximate control structures in joint space. They proposed the uncontrolled manifold (UCM) hypothesis to quantify the joint coordination of human movement. <xref ref-type="bibr" rid="B100">Togo et al. (2016)</xref> proposed a UCM reference feedback control method that incrementally generated a target UCM from a given target end-effector trajectory and combined it with the target joint in joint space to minimize the cost function with respect to the input joint torque and torque variation. They also quantitatively compared the results of simulation and measurement experiments for a target tracking task. Statistical results showed that the proposed method quantitatively reproduced the kinematics and dynamics properties of the upper limbs (end-effector posture, end-effector velocity, and joint torque, etc.).</p>
<p>In upper limb rehabilitation, temporal and spatial coordination serve as an important indicator of whether the human body has normal motion ability, which directly reflects the rehabilitation effect of patients with physical disabilities. In complex scenarios such as industrial and service, temporal and spatial coordination can reflect the degree of collaboration of multiple robotic arms and directly affect the efficiency of task completion (<xref ref-type="bibr" rid="B122">Zhao et al., 2021</xref>). <xref ref-type="bibr" rid="B29">Gielniak et al. (2013)</xref> used motion clarity as a measure of a robot&#x2019;s ability to understand human movement and engage in human-robot interaction, and used spatiotemporal coordination as a factor in synchronizing robotic arm movement with human movement in an anthropomorphic motion generation algorithm.</p>
<p>The aforementioned work is concerned with intra-arm coordination in single-arm movement, in addition to inter-arm coordination between dual-arm movement and coordination between the upper limbs and other parts of the body. <xref ref-type="bibr" rid="B78">Qu et al. (2019)</xref> constructed a learning model including PCA, GMM and GMR to extract the intra-arm and inter-arm coordination characteristics of the human upper limbs by analyzing the human bimanual motion data, derived the anthropomorphic coordination motion equations by combining the intra-arm and inter-arm coordination constraints, generated anthropomorphic trajectories of bimanual robots, and experimentally reproduced the anthropomorphic coordination motion, which could improve the human-robot interaction capability of the bimanual robots.</p>
<p>Furthermore, body language (gestures, body postures, facial expressions, etc.) is also an important part of conveying social information in human-computer interaction (<xref ref-type="bibr" rid="B60">L&#xfc;tkebohle et al., 2010</xref>). Through body language, complemented by coordinated body movement to signal or imply goals, express emotions or intentions, and obtain status or feedback, human-computer interaction can be more natural and efficient. However, sometimes the information expected to be expressed by head movement is not perfect and the interacting objects cannot understand the full meaning, and then the auxiliary functions of other limbs become extremely important. Researchers have explored the role of coordination movement of different limbs (e.g., hand-eye coordination (<xref ref-type="bibr" rid="B14">Chao et al., 2018</xref>; <xref ref-type="bibr" rid="B70">Olson et al., 2020</xref>), head-eye coordination (<xref ref-type="bibr" rid="B71">Omrcen and Ude, 2010</xref>; <xref ref-type="bibr" rid="B61">Milighetti et al., 2011</xref>), neck-eye coordination (<xref ref-type="bibr" rid="B79">Rajruangrabin and Popa, 2010</xref>), etc.) in augmenting head movement at the level of information conveyance, as well as their planning and control schemes. Based on these studies, <xref ref-type="bibr" rid="B120">Zhang et al. (2015)</xref> proposed a new online generation method of anthropomorphic motion based on head-arm coordination, which considered not only the two-arm coordination motion, but also the head-arm coordination, and finally verified by computer simulation and physical experiments. <xref ref-type="table" rid="T4">Table 4</xref> gives an overview of main approaches to solve motion coordination.</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Approaches to solve motion coordination.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Study</th>
<th align="center">Task</th>
<th align="center">Anthropomorphic criterion</th>
<th align="center">Approach</th>
<th align="center">Contribution</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">
<xref ref-type="bibr" rid="B79">Rajruangrabin and Popa (2010)</xref>
</td>
<td align="center">Robot head human tracking</td>
<td align="center">Eye-neck coordination</td>
<td align="center">Visual feedback and optimization, reinforcement learning</td>
<td align="center">Propose an optimization approach, combined with real-time visual feedback, to generate human-like motion</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B61">Milighetti et al. (2011)</xref>
</td>
<td align="center">Visual tracking of a moving target with unknown and arbitrary trajectory</td>
<td align="center">Head-eye coordination</td>
<td align="center">Adaptive Kalman Filter, trajectory tracking control</td>
<td align="center">Proposed a gaze control scheme to achieve human-like joint motions</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B15">Coscia et al. (2014)</xref>
</td>
<td align="center">Reaching movements</td>
<td align="center">Muscle synergies</td>
<td align="center">Non-negative matrix factorization</td>
<td align="center">Understand the effect of muscle coordination when performing upper extremity exercises</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B120">Zhang et al. (2015)</xref>
</td>
<td align="center">Tracking external targets and body parts</td>
<td align="center">Head-arm coordination</td>
<td align="center">A quadratic program-based method</td>
<td align="center">Propose a novel head-arm-based human-like behavior generation scheme</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B100">Togo et al. (2016)</xref>
</td>
<td align="center">One-dimensional target-tracking task</td>
<td align="center">Joint coordination</td>
<td align="center">UCM</td>
<td align="center">UCM reference feedback control can reproduce human-like joint coordination</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B14">Chao et al. (2018)</xref>
</td>
<td align="center">Saccade movements, hand spontaneous movements</td>
<td align="center">Hand-eye coordination</td>
<td align="center">Constructive neural networks</td>
<td align="center">Build a reverse transformation from the robot actuators space to the robot visual space</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B95">Tang et al. (2019)</xref>
</td>
<td align="center">Reaching task</td>
<td align="center">Kinematic synergies</td>
<td align="center">PCA</td>
<td align="center">Confirm that kinematic synergies can be used for exoskeleton motion planning</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B78">Qu et al. (2019)</xref>
</td>
<td align="center">Carrying and pouring</td>
<td align="center">Intra-arm and inter-arm coordination</td>
<td align="center">A learning model consisting of PCA, GMM and GMR</td>
<td align="center">Propose a method based on human-arm coordination characteristics to enhance human-robot interaction ability</td>
</tr>
<tr>
<td align="center">
<xref ref-type="bibr" rid="B121">Zhao et al. (2022)</xref>
</td>
<td align="center">Point-to-point reaching movements</td>
<td align="center">Muscle synergies</td>
<td align="center">Non-negative matrix factorization</td>
<td align="center">Promote applications of muscle synergies in clinical scenarios</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Although researchers have proposed various coordination algorithms to control the coordinated motion of the robotic arm, due to the limitation of the understanding of the human movement control mechanism, the researches on motion coordination can only reduce the motion errors of the robotic arm in most cases, and cannot make it eliminated. By further studying the human-robot interaction mechanism and assisting more accurate and sensitive sensor technology to provide better theoretical guidance for the robot&#x2019;s motion control strategy, it is possible to better optimize the coordination of the robot&#x2019;s anthropomorphic motion.</p>
</sec>
<sec id="s5">
<title>5 Future challenges</title>
<p>With the in-depth study of human movement function, wearable exoskeleton robots (<xref ref-type="bibr" rid="B57">Liu et al., 2018</xref>) and medical robots (<xref ref-type="bibr" rid="B119">Zhang et al., 2020</xref>) with anthropomorphic motion planning ability have gradually come into view. In the future, it is expected that more products will be introduced to meet human needs. However, in order to provide high-quality services and realize large-scale applications, the following challenges need to be addressed based on existing researches.<list list-type="simple">
<list-item>
<p>(1) Intelligence and autonomy enhancement. In unstructured scenarios such as homes and restaurants, to provide better service, robots should be more intelligent, make fast and accurate decisions, and take appropriate actions based on task requirements and real-time situations to improve work efficiency. At the same time, the full autonomy of humanoid robots allows them to take on heavy, dangerous or boring tasks, which enables humans to focus more on creative and advanced thinking. Unfortunately, existing humanoid robots are not yet able to be fully autonomous from humans. With continuous advances in artificial intelligence, sensor technology, control algorithms, and other fields, we can expect future robots to achieve a higher level of autonomy.</p>
</list-item>
<list-item>
<p>(2) Multimodal interaction and human-robot fusion. To enhance the personalized interaction experience, the robot should integrate multiple sensors such as visual, auditory, and haptic (<xref ref-type="bibr" rid="B51">Li G. et al., 2019</xref>) to comprehensively understand the user&#x2019;s behavioral patterns, accurately respond to the user&#x2019;s needs, and monitor the user&#x2019;s feedback. Through various forms of input and corresponding outputs, the multimodal interaction capability can realize a richer and more convenient human-computer interaction experience. However, current technologies cannot fully resolve the conflict between interaction efficiency and safety.</p>
</list-item>
<list-item>
<p>(3) Emotional interaction and emotional intelligence. Emotional interaction and emotional intelligence in humanoid robots enable them to better understand and respond to human emotional needs. Through emotional interaction, robots can communicate and interact emotionally with humans. Through emotional intelligence, robots can process and analyze emotional information and make corresponding intelligent decisions based on emotional information. The development of this technology will bring people more user-friendly and personalized robot services and support.</p>
</list-item>
<list-item>
<p>(4) Humanitarian and ethical considerations. The future development of humanoid robots should also focus on humanitarian and ethical considerations. Ethical guidelines must be followed in the design and application process to ensure that robots behave in accordance with moral and social values and are able to contribute positively to human wellbeing and social development.</p>
</list-item>
</list>
</p>
</sec>
<sec sec-type="conclusion" id="s6">
<title>6 Conclusion</title>
<p>In this article, we reviewed representative anthropomorphic motion planning researches for multi-degree-of-freedom robotic arms. By in-depth analysis of human natural motion, we proposed a novel classification method that incorporated human movement laws into robot motion control based on physiology, and constructed a more complete anthropomorphic planning system to better address the problem of anthropomorphic motion planning. This classification encompasses the majority of current anthropomorphic motor planning research results. It not only summarizes and integrates existing research results but also provides an in-depth exploration and understanding of the deeper causes of human movement ability. This categorization method comprehensively and systematically examines the reasons for the formation of unique human movement abilities in three major aspects: motion patterns, individual variation, and functional control. Firstly, from a physiological perspective, the formation of natural human movement ability is inextricably linked to body composition. The flexibility provided by the redundancy of the upper limbs ensures that humans can accomplish various types of complex tasks. Therefore, motion redundancy is the primary issue addressed in anthropomorphic motion planning. Secondly, individual variation is also a significant factor affecting human movement abilities. Each individual possesses unique physical characteristics, exercise habits, and psychological states, which can influence movement performance. Therefore, it is crucial to consider individual variation when designing anthropomorphic movement plans. Motion variation is a significant challenge in this domain. Finally, functional control is essential for human movement ability. The nervous system plays a pivotal role in regulating daily life movement. In addition, in order to maintain balance during movement and to improve the accuracy and stability of movement execution, the motion coordination of the limbs is an important symbol that distinguishes human beings from non-living beings (robots) or human beings with impaired motor function (patients with limb disabilities). Therefore, motion coordination is an important criterion for robot motion to be anthropomorphic. During the development, researchers have moved from single anthropomorphic criterion to consider multiple criteria to ensure that motion is sufficiently anthropomorphic. In addition, each section of the article discusses in detail the various research approaches to understanding the anthropomorphism of movement and expresses appreciations for the value that these findings provide in the anthropomorphic planning system. The article also points out the current challenges faced by anthropomorphic motion planning and suggests possible trends for the future. Once these difficulties are overcome, humanoid robots with more advanced anthropomorphic motion planning abilities will be realized in real life, contributing to the improvement of human living standards for the benefits of the society.</p>
</sec>
</body>
<back>
<sec id="s7">
<title>Author contributions</title>
<p>XZ: Formal Analysis, Investigation, Methodology, Validation, Visualization, Writing&#x2013;original draft, Writing&#x2013;review and editing. YH: Conceptualization, Funding acquisition, Project administration, Resources, Supervision, Writing&#x2013;original draft, Writing&#x2013;review and editing. JL: Conceptualization, Funding acquisition, Project administration, Resources, Supervision, Writing&#x2013;original draft, Writing&#x2013;review and editing.</p>
</sec>
<sec sec-type="funding-information" id="s8">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. This research was funded by the National Key R&#x26;D Program of China (2020YFC2007800) granted to Huazhong University of Science and Technology and the National Natural Science Foundation of China (52005191, 32271082, and 52027806).</p>
</sec>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Albrecht</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ramirez-Amaro</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ruiz-Ugalde</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Weikersdorfer</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Leibold</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ulbrich</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2011</year>). &#x201c;<article-title>Imitating human reaching motions using physically inspired optimization principles</article-title>,&#x201d; in <conf-name>2011 11th IEEE-RAS International Conference on Humanoid Robots</conf-name> (<publisher-loc>Bled, Slovenia</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>602</fpage>&#x2013;<lpage>607</lpage>. <pub-id pub-id-type="doi">10.1109/humanoids.2011.6100856</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Arimoto</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sekimoto</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2006</year>). &#x201c;<article-title>Human-like movements of robotic arms with redundant DOFs: virtual spring-damper hypothesis to tackle the Bernstein problem</article-title>,&#x201d; in <conf-name>Proceedings of the 2006 IEEE International Conference on Robotics and Automation</conf-name> (<publisher-loc>Orlando, Florida, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1860</fpage>&#x2013;<lpage>1866</lpage>. <pub-id pub-id-type="doi">10.1109/robot.2006.1641977</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Arkin</surname>
<given-names>R. C.</given-names>
</name>
<name>
<surname>Moshkina</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2014</year>). &#x201c;<article-title>Affect in human-robot interaction</article-title>,&#x201d; in <source>The oxford handbook of affective computing</source>. Editor <person-group person-group-type="editor">
<name>
<surname>Calvo</surname>
<given-names>R.</given-names>
</name>
</person-group> (<publisher-loc>Oxford</publisher-loc>: <publisher-name>Oxford University Press</publisher-name>), <fpage>483</fpage>&#x2013;<lpage>493</lpage>.</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Artemiadis</surname>
<given-names>P. K.</given-names>
</name>
<name>
<surname>Katsiaris</surname>
<given-names>P. T.</given-names>
</name>
<name>
<surname>Kyriakopoulos</surname>
<given-names>K. J.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>A biomimetic approach to inverse kinematics for a redundant robot arm</article-title>. <source>Aut. Robots</source> <volume>29</volume> (<issue>3-4</issue>), <fpage>293</fpage>&#x2013;<lpage>308</lpage>. <pub-id pub-id-type="doi">10.1007/s10514-010-9196-x</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Atkeson</surname>
<given-names>C. G.</given-names>
</name>
<name>
<surname>Hollerbach</surname>
<given-names>J. M.</given-names>
</name>
</person-group> (<year>1985</year>). <article-title>Kinematic features of unrestrained vertical arm movements</article-title>. <source>J. Neurosci.</source> <volume>5</volume> (<issue>9</issue>), <fpage>2318</fpage>&#x2013;<lpage>2330</lpage>. <pub-id pub-id-type="doi">10.1523/jneurosci.05-09-02318.1985</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Averbeck</surname>
<given-names>B. B.</given-names>
</name>
<name>
<surname>Chafee</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Crowe</surname>
<given-names>D. A.</given-names>
</name>
<name>
<surname>Georgopoulos</surname>
<given-names>A. P.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Parallel processing of serial movements in prefrontal cortex</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>99</volume> (<issue>20</issue>), <fpage>13172</fpage>&#x2013;<lpage>13177</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.162485599</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Averta</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Della Santina</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Valenza</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Bicchi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bianchi</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Exploiting upper-limb functional principal components for human-like motion generation of anthropomorphic robots</article-title>. <source>J. Neuroeng Rehabil.</source> <volume>17</volume> (<issue>1</issue>), <fpage>63</fpage>. <pub-id pub-id-type="doi">10.1186/s12984-020-00680-8</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Berman</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>LiebermannFlash</surname>
<given-names>D. G. T.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Application of motor algebra to the analysis of human arm movements</article-title>. <source>Robotica</source> <volume>26</volume> (<issue>4</issue>), <fpage>435</fpage>&#x2013;<lpage>451</lpage>. <pub-id pub-id-type="doi">10.1017/S0263574707003979</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Berret</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Chiovetto</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Nori</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Pozzo</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Evidence for composite cost functions in arm movement planning: an inverse optimal control approach</article-title>. <source>PLoS Comput. Biol.</source> <volume>7</volume> (<issue>10</issue>), <fpage>e1002183</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pcbi.1002183</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>BizziCheung</surname>
<given-names>E. V. C. K.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>The neural origin of muscle synergies</article-title>. <source>Front. Comput. Neurosci.</source> <volume>7</volume>, <fpage>51</fpage>. <pub-id pub-id-type="doi">10.3389/fncom.2013.00051</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bruton</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>O&#x2019;dwyer</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Synergies in coordination: a comprehensive overview of neural, computational, and behavioral approaches</article-title>. <source>J. Neurophysiology</source> <volume>120</volume> (<issue>6</issue>), <fpage>2761</fpage>&#x2013;<lpage>2774</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00052.2018</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Calinon</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>D&#x27;halluin</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Sauser</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Caldwell</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Billard</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Learning and reproduction of gestures by imitation</article-title>. <source>IEEE Robotics Automation Mag.</source> <volume>17</volume> (<issue>2</issue>), <fpage>44</fpage>&#x2013;<lpage>54</lpage>. <pub-id pub-id-type="doi">10.1109/mra.2010.936947</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chao</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>C.-M.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Shang</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Enhanced robotic hand&#x2013;eye coordination inspired from human-like behavioral patterns</article-title>. <source>IEEE Trans. Cognitive Dev. Syst.</source> <volume>10</volume> (<issue>2</issue>), <fpage>384</fpage>&#x2013;<lpage>396</lpage>. <pub-id pub-id-type="doi">10.1109/tcds.2016.2620156</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Coscia</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Cheung</surname>
<given-names>V. C. K.</given-names>
</name>
<name>
<surname>Tropea</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Koenig</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Monaco</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Bennis</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2014</year>). . <source>J. NeuroEngineering Rehabilitation</source> <volume>11</volume> (<issue>1</issue>), <fpage>22</fpage>. <pub-id pub-id-type="doi">10.1186/1743-0003-11-22</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>d&#x2019;Avella</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Modularity for motor control and motor learning</article-title>. <source>Prog. Mot. Control</source> <volume>957</volume>, <fpage>3</fpage>&#x2013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-47313-0_1</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Deng</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Kang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>C. L. P.</given-names>
</name>
<name>
<surname>Chu</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A learning-based hierarchical control scheme for an exoskeleton robot in human&#x2013;robot cooperative manipulation</article-title>. <source>IEEE Trans. Cybern.</source> <volume>50</volume> (<issue>1</issue>), <fpage>112</fpage>&#x2013;<lpage>125</lpage>. <pub-id pub-id-type="doi">10.1109/tcyb.2018.2864784</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Dragan</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Srinivasa</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2014</year>). &#x201c;<article-title>Familiarization to robot motion</article-title>,&#x201d; in <conf-name>Proceedings of the 2014 ACM/IEEE international conference on human-robot interaction</conf-name> (<publisher-loc>Bielefeld, Germany</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>366</fpage>&#x2013;<lpage>373</lpage>. <pub-id pub-id-type="doi">10.1145/2559636.2559674</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Tsagarakis</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A2ML: a general human-inspired motion language for anthropomorphic arms based on movement primitives</article-title>. <source>Robotics Aut. Syst.</source> <volume>111</volume>, <fpage>145</fpage>&#x2013;<lpage>161</lpage>. <pub-id pub-id-type="doi">10.1016/j.robot.2018.10.006</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ferrer</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Diaz</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Quintana</surname>
<given-names>J. J.</given-names>
</name>
<name>
<surname>Carmona-Duarte</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Plamondon</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Extending the kinematic theory of rapid movements with new primitives</article-title>. <source>Pattern Recognit. Lett.</source> <volume>167</volume>, <fpage>181</fpage>&#x2013;<lpage>188</lpage>. <pub-id pub-id-type="doi">10.1016/j.patrec.2023.02.021</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ficuciello</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Palli</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Melchiorri</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Siciliano</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Postural synergies of the UB Hand IV for human-like grasping</article-title>. <source>Robotics Aut. Syst.</source> <volume>62</volume> (<issue>4</issue>), <fpage>515</fpage>&#x2013;<lpage>527</lpage>. <pub-id pub-id-type="doi">10.1016/j.robot.2013.12.008</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fitts</surname>
<given-names>P. M.</given-names>
</name>
</person-group> (<year>1954</year>). <article-title>The information capacity of the human motor system in controlling the amplitude of movement</article-title>. <source>J. Exp. Psychol.</source> <volume>47</volume> (<issue>6</issue>), <fpage>381</fpage>&#x2013;<lpage>391</lpage>. <pub-id pub-id-type="doi">10.1037/h0055392</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Flash</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hogan</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>1985</year>). <article-title>The coordination of arm movements: an experimentally confirmed mathematical model</article-title>. <source>J. Neurosci.</source> <volume>5</volume> (<issue>7</issue>), <fpage>1688</fpage>&#x2013;<lpage>1703</lpage>. <pub-id pub-id-type="doi">10.1523/jneurosci.05-07-01688.1985</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Flash</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Meirovitch</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Barliya</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Models of human movement: trajectory planning and inverse kinematics studies</article-title>. <source>Robotics Aut. Syst.</source> <volume>61</volume> (<issue>4</issue>), <fpage>330</fpage>&#x2013;<lpage>339</lpage>. <pub-id pub-id-type="doi">10.1016/j.robot.2012.09.020</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Freitas</surname>
<given-names>D. L.</given-names>
</name>
<name>
<surname>Lausen</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Maia</surname>
<given-names>J. a. R.</given-names>
</name>
<name>
<surname>Gouveia</surname>
<given-names>&#xc9;. R.</given-names>
</name>
<name>
<surname>Thomis</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lefevre</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Skeletal maturation, body size, and motor coordination in youth 11&#x2013;14 years</article-title>. <source>Med. Sci. Sports Exerc.</source> <volume>48</volume> (<issue>6</issue>), <fpage>1129</fpage>&#x2013;<lpage>1135</lpage>. <pub-id pub-id-type="doi">10.1249/mss.0000000000000873</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gams</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Nemec</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Ijspeert</surname>
<given-names>A. J.</given-names>
</name>
<name>
<surname>Ude</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Coupling movement primitives: interaction with the environment and bimanual tasks</article-title>. <source>IEEE Trans. Robotics</source> <volume>30</volume> (<issue>4</issue>), <fpage>816</fpage>&#x2013;<lpage>830</lpage>. <pub-id pub-id-type="doi">10.1109/tro.2014.2304775</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Garcia-Rosas</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Oetomo</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Manzie</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Tan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Choong</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>On the relationship between human motor control performance and kinematic synergies in upper limb prosthetics</article-title>,&#x201d; in <conf-name>2018 40th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</conf-name> (<publisher-loc>Honolulu, Hawaii, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>3194</fpage>&#x2013;<lpage>3197</lpage>. <pub-id pub-id-type="doi">10.1109/EMBC.2018.8512992</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Geoffroy</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Mansard</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Raison</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Achiche</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Todorov</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>From inverse kinematics to optimal control</article-title>. <source>Adv. Robot Kinemat.</source>, <fpage>409</fpage>&#x2013;<lpage>418</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-06698-1_42</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gielniak</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>C. K.</given-names>
</name>
<name>
<surname>Thomaz</surname>
<given-names>A. L.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Generating human-like motion for robots</article-title>. <source>Int. J. Robotics Res.</source> <volume>32</volume> (<issue>11</issue>), <fpage>1275</fpage>&#x2013;<lpage>1301</lpage>. <pub-id pub-id-type="doi">10.1177/0278364913490533</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Giszter</surname>
<given-names>S. F.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Motor primitives - new data and future questions</article-title>. <source>Curr. Opin. Neurobiol.</source> <volume>33</volume>, <fpage>156</fpage>&#x2013;<lpage>165</lpage>. <pub-id pub-id-type="doi">10.1016/j.conb.2015.04.004</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gong</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Task motion planning for anthropomorphic arms based on human arm movement primitives</article-title>. <source>Industrial Robot-The Int. J. Robotics Res. Appl.</source> <volume>47</volume> (<issue>5</issue>), <fpage>669</fpage>&#x2013;<lpage>681</lpage>. <pub-id pub-id-type="doi">10.1108/ir-12-2019-0261</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gosselin-Kessiby</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Messier</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kalaska</surname>
<given-names>J. F.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Evidence for automatic on-line adjustments of hand orientation during natural reaching movements to stationary targets</article-title>. <source>J. Neurophysiology</source> <volume>99</volume> (<issue>4</issue>), <fpage>1653</fpage>&#x2013;<lpage>1671</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00980.2007</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guiard</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>1987</year>). <article-title>Asymmetric division of labor in human skilled bimanual action: the kinematic chain as a model</article-title>. <source>J. Mot. Behav.</source> <volume>19</volume> (<issue>4</issue>), <fpage>486</fpage>&#x2013;<lpage>517</lpage>. <pub-id pub-id-type="doi">10.1080/00222895.1987.10735426</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guigon</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Baraduc</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Desmurget</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Computational motor control: redundancy and invariance</article-title>. <source>J. Neurophysiology</source> <volume>97</volume> (<issue>1</issue>), <fpage>331</fpage>&#x2013;<lpage>347</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00290.2006</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Herbort</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Butz</surname>
<given-names>M. V.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Planning and control of hand orientation in grasping movements</article-title>. <source>Exp. Brain Res.</source> <volume>202</volume> (<issue>4</issue>), <fpage>867</fpage>&#x2013;<lpage>878</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-010-2191-9</pub-id>
</citation>
</ref>
<ref id="B125">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Trajectory planning method of 6-DOF modular manipulator based on polynomial interpolation</article-title>. <source>Journal of Computational Methods in Sciences and Engineering</source> <volume>23</volume> (<issue>3</issue>), <fpage>1589</fpage>&#x2013;<lpage>1600</lpage>. <pub-id pub-id-type="doi">10.3233/JCM-226672</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Qiu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Learning physical human&#x2013;robot interaction with coupled cooperative primitives for a lower exoskeleton</article-title>. <source>IEEE Trans. Automation Sci. Eng.</source> <volume>16</volume> (<issue>4</issue>), <fpage>1566</fpage>&#x2013;<lpage>1574</lpage>. <pub-id pub-id-type="doi">10.1109/tase.2018.2886376</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>HuangZhang</surname>
<given-names>H. T.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>C. L. P.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Motor learning and generalization using broad learning adaptive neural control</article-title>. <source>IEEE Trans. Industrial Electron.</source> <volume>67</volume> (<issue>10</issue>), <fpage>8608</fpage>&#x2013;<lpage>8617</lpage>. <pub-id pub-id-type="doi">10.1109/tie.2019.2950853</pub-id>
</citation>
</ref>
<ref id="B38">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Kang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Tillery</surname>
<given-names>S. H.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2003</year>). &#x201c;<article-title>Determining natural arm configuration along reaching trajectory</article-title>,&#x201d; in <conf-name>Proceedings of the 25th Annual lntemational Conference of the IEEE Engineering in Medicine and Biology Society</conf-name> (<publisher-loc>Cancun, Mexico</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1444</fpage>&#x2013;<lpage>1447</lpage>. <pub-id pub-id-type="doi">10.1109/iembs.2003.1279599</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kavraki</surname>
<given-names>L. E.</given-names>
</name>
<name>
<surname>Svestka</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Latombe</surname>
<given-names>J. C.</given-names>
</name>
<name>
<surname>Overmars</surname>
<given-names>M. H.</given-names>
</name>
</person-group> (<year>1996</year>). <article-title>Probabilistic roadmaps for path planning in high-dimensional configuration spaces</article-title>. <source>IEEE Trans. Robotics Automation</source> <volume>12</volume> (<issue>4</issue>), <fpage>566</fpage>&#x2013;<lpage>580</lpage>. <pub-id pub-id-type="doi">10.1109/70.508439</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kiesler</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Powers</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Fussell</surname>
<given-names>S. R.</given-names>
</name>
<name>
<surname>Torrey</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Anthropomorphic interactions with a robot and robot-like agent</article-title>. <source>Soc. Cogn.</source> <volume>26</volume> (<issue>2</issue>), <fpage>169</fpage>&#x2013;<lpage>181</lpage>. <pub-id pub-id-type="doi">10.1521/soco.2008.26.2.169</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Kim</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Milutinovic</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Rosen</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2012</year>). &#x201c;<article-title>Resolving the redundancy of a seven dof wearable robotic system based on kinematic and dynamic constraint</article-title>,&#x201d; in <conf-name>2012 IEEE International Conference on Robotics and Automation</conf-name> (<publisher-loc>Saint Paul, Minnesota, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>305</fpage>&#x2013;<lpage>310</lpage>. <pub-id pub-id-type="doi">10.1109/icra.2012.6224830</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Kim</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Park</surname>
<given-names>J. H.</given-names>
</name>
</person-group> (<year>2006</year>). &#x201c;<article-title>Human-like arm motion generation for humanoid robots using motion capture database</article-title>,&#x201d; in <conf-name>Proceedings of the 2006 IEEE/RSJ International Conference on Intelligent Robots and Systems</conf-name> (<publisher-loc>Beijing, China</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>3486</fpage>&#x2013;<lpage>3491</lpage>. <pub-id pub-id-type="doi">10.1109/iros.2006.282591</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>KoenigMatari&#x107;</surname>
<given-names>N. M. J.</given-names>
</name>
<name>
<surname>Matari&#x107;</surname>
<given-names>M. J.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Robot life-long task learning from human demonstrations: a Bayesian approach</article-title>. <source>Aut. Robots</source> <volume>41</volume> (<issue>5</issue>), <fpage>1173</fpage>&#x2013;<lpage>1188</lpage>. <pub-id pub-id-type="doi">10.1007/s10514-016-9601-1</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Kuffner</surname>
<given-names>J. J.</given-names>
</name>
<name>
<surname>Lavalle</surname>
<given-names>S. M.</given-names>
</name>
</person-group> (<year>2000</year>). &#x201c;<article-title>RRT-connect: an efficient approach to single-query path planning</article-title>,&#x201d; in <conf-name>Proceedings of the 2000 IEEE International Conference on Robotics and Automation</conf-name> (<publisher-loc>San Francisco, California, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>995</fpage>&#x2013;<lpage>1001</lpage>. <pub-id pub-id-type="doi">10.1109/robot.2000.844730</pub-id>
</citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>K&#xfc;hnlenz</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Sosnowski</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bu&#xdf;</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wollherr</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>K&#xfc;hnlenz</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Buss</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Increasing helpfulness towards a robot by emotional adaption to the user</article-title>. <source>Int. J. Soc. Robotics</source> <volume>5</volume> (<issue>4</issue>), <fpage>457</fpage>&#x2013;<lpage>476</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-013-0182-2</pub-id>
</citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kulic</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Venture</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Yamane</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Demircan</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Mizuuchi</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Mombaur</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Anthropomorphic movement analysis and synthesis: a survey of methods and applications</article-title>. <source>IEEE Trans. Robotics</source> <volume>32</volume> (<issue>4</issue>), <fpage>776</fpage>&#x2013;<lpage>795</lpage>. <pub-id pub-id-type="doi">10.1109/tro.2016.2587744</pub-id>
</citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kuniyoshi</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Inaba</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Inoue</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>1994</year>). <article-title>Learning by watching: extracting reusable task knowledge from visual observation of human performance</article-title>. <source>IEEE Trans. robotics automation</source> <volume>10</volume> (<issue>6</issue>), <fpage>799</fpage>&#x2013;<lpage>822</lpage>. <pub-id pub-id-type="doi">10.1109/70.338535</pub-id>
</citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Latash</surname>
<given-names>M. L.</given-names>
</name>
<name>
<surname>Scholz</surname>
<given-names>J. P.</given-names>
</name>
<name>
<surname>Sch&#xf6;ner</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Motor control strategies revealed in the structure of motor variability</article-title>. <source>Exerc. Sport Sci. Rev.</source> <volume>30</volume> (<issue>1</issue>), <fpage>26</fpage>&#x2013;<lpage>31</lpage>. <pub-id pub-id-type="doi">10.1097/00003677-200201000-00006</pub-id>
</citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lauretti</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Cordella</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zollo</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A hybrid joint/cartesian DMP-based approach for obstacle avoidance of anthropomorphic assistive robots</article-title>. <source>Int. J. Soc. Robotics</source> <volume>11</volume> (<issue>5</issue>), <fpage>783</fpage>&#x2013;<lpage>796</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00597-w</pub-id>
</citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lenzi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Lipsey</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Sensinger</surname>
<given-names>J. W.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>The RIC arm - a small, anthropomorphic transhumeral prosthesis</article-title>. <source>IEEE-ASME Trans. Mechatronics</source> <volume>21</volume> (<issue>6</issue>), <fpage>2660</fpage>&#x2013;<lpage>2671</lpage>. <pub-id pub-id-type="doi">10.1109/tmech.2016.2596104</pub-id>
</citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Fang</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Cai</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2019a</year>). <article-title>Inverse kinematic analysis and trajectory planning of a modular upper limb rehabilitation exoskeleton</article-title>. <source>Technol. Health Care</source> <volume>27</volume> (<issue>S1</issue>), <fpage>123</fpage>&#x2013;<lpage>132</lpage>. <pub-id pub-id-type="doi">10.3233/THC-199012</pub-id>
</citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Han</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Xiong</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Human-like redundancy resolution: an integrated inverse kinematics scheme for anthropomorphic manipulators with radial elbow offset</article-title>. <source>Adv. Eng. Inf.</source> <volume>54</volume>, <fpage>101812</fpage>. <pub-id pub-id-type="doi">10.1016/j.aei.2022.101812</pub-id>
</citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Bicchi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Fukuda</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2019b</year>). <article-title>Guest editorial neuro-robotics systems: sensing, cognition, learning, and control</article-title>. <source>IEEE Trans. Cognitive Dev. Syst.</source> <volume>11</volume> (<issue>2</issue>), <fpage>145</fpage>&#x2013;<lpage>147</lpage>. <pub-id pub-id-type="doi">10.1109/tcds.2019.2915408</pub-id>
</citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zuo</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Zeroing dynamics method for motion control of industrial upper-limb exoskeleton system with minimal potential energy modulation</article-title>. <source>Measurement</source> <volume>163</volume>, <fpage>107964</fpage>. <pub-id pub-id-type="doi">10.1016/j.measurement.2020.107964</pub-id>
</citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>H.-Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>W.-J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>R.-J.</given-names>
</name>
<name>
<surname>Tung</surname>
<given-names>C.-W.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>P.-J.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>I. P.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Image recognition and force measurement application in the humanoid robot imitation</article-title>. <source>IEEE Trans. Instrum. Meas.</source> <volume>61</volume> (<issue>1</issue>), <fpage>149</fpage>&#x2013;<lpage>161</lpage>. <pub-id pub-id-type="doi">10.1109/tim.2011.2161025</pub-id>
</citation>
</ref>
<ref id="B56">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Xiong</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2013</year>). &#x201c;<article-title>A novel 10-DoF exoskeleton rehabilitation robot based on the postural synergies of upper extremity movements</article-title>,&#x201d; in <conf-name>Intelligent Robotics and Applications: 6th International Conference, ICIRA 2013</conf-name> (<publisher-loc>Busan, South Korea</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>363</fpage>&#x2013;<lpage>372</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-642-40852-6_37</pub-id>
</citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Xiong</surname>
<given-names>C.-H.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>W.-B.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>X.-L.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Postural synergy based design of exoskeleton robot replicating human arm reaching movements</article-title>. <source>Robotics Aut. Syst.</source> <volume>99</volume>, <fpage>84</fpage>&#x2013;<lpage>96</lpage>. <pub-id pub-id-type="doi">10.1016/j.robot.2017.10.003</pub-id>
</citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Steil</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Analytical inverse kinematics solver for anthropomorphic 7-DOF redundant manipulators with human-like configuration constraints</article-title>. <source>J. Intelligent Robotic Syst.</source> <volume>86</volume> (<issue>1</issue>), <fpage>63</fpage>&#x2013;<lpage>79</lpage>. <pub-id pub-id-type="doi">10.1007/s10846-016-0449-6</pub-id>
</citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A trajectory and force dual-incremental robot skill learning and generalization framework using improved dynamical movement primitives and adaptive neural network control</article-title>. <source>Neurocomputing</source> <volume>521</volume>, <fpage>146</fpage>&#x2013;<lpage>159</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2022.11.076</pub-id>
</citation>
</ref>
<ref id="B60">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>L&#xfc;tkebohle</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Hegel</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Schulz</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Hackel</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wrede</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Wachsmuth</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2010</year>). &#x201c;<article-title>The bielefeld anthropomorphic robot head "flobi"</article-title>,&#x201d; in <conf-name>2010 IEEE International Conference on Robotics and Automation Anchorage Convention District</conf-name> (<publisher-loc>Anchorage, Alaska, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>3384</fpage>&#x2013;<lpage>3391</lpage>. <pub-id pub-id-type="doi">10.1109/robot.2010.5509173</pub-id>
</citation>
</ref>
<ref id="B61">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Milighetti</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Vallone</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>De Luca</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2011</year>). &#x201c;<article-title>Adaptive predictive gaze control of a redundant humanoid robot head</article-title>,&#x201d; in <conf-name>2011 IEEE/RSJ International Conference on Intelligent Robots and Systems</conf-name> (<publisher-loc>San Francisco, California, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>3192</fpage>&#x2013;<lpage>3198</lpage>. <pub-id pub-id-type="doi">10.1109/iros.2011.6094417</pub-id>
</citation>
</ref>
<ref id="B62">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Minato</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Shimada</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Itakura</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Evaluating the human likeness of an android by comparing gaze behaviors elicited by the android and a person</article-title>. <source>Adv. Robot.</source> <volume>20</volume> (<issue>10</issue>), <fpage>1147</fpage>&#x2013;<lpage>1163</lpage>. <pub-id pub-id-type="doi">10.1163/156855306778522505</pub-id>
</citation>
</ref>
<ref id="B63">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moiseev</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Gorodnichev</surname>
<given-names>R. M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Motor synergy structure variability in different intensity locomotions</article-title>. <source>Hum. Physiol.</source> <volume>48</volume> (<issue>4</issue>), <fpage>370</fpage>&#x2013;<lpage>380</lpage>. <pub-id pub-id-type="doi">10.1134/s0362119722040089</pub-id>
</citation>
</ref>
<ref id="B64">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Morasso</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>1981</year>). <article-title>Spatial control of arm movements</article-title>. <source>Exp. Brain Res.</source> <volume>42</volume> (<issue>2</issue>), <fpage>223</fpage>&#x2013;<lpage>227</lpage>. <pub-id pub-id-type="doi">10.1007/bf00236911</pub-id>
</citation>
</ref>
<ref id="B65">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>M&#xfc;lling</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Kober</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>KroemerPeters</surname>
<given-names>O. J.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Learning to select and generalize striking movements in robot table tennis</article-title>. <source>Int. J. Robotics Res.</source> <volume>32</volume> (<issue>3</issue>), <fpage>263</fpage>&#x2013;<lpage>279</lpage>. <pub-id pub-id-type="doi">10.1177/0278364912472380</pub-id>
</citation>
</ref>
<ref id="B66">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mussa-Ivaldi</surname>
<given-names>F. A.</given-names>
</name>
<name>
<surname>Bizzi</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>Motor learning through the combination of primitives</article-title>. <source>Philos. Trans. R. Soc. Lond B Biol. Sci.</source> <volume>355</volume> (<issue>1404</issue>), <fpage>1755</fpage>&#x2013;<lpage>1769</lpage>. <pub-id pub-id-type="doi">10.1098/rstb.2000.0733</pub-id>
</citation>
</ref>
<ref id="B67">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nagahama</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Demura</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yamazaki</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Robot learning of tool manipulation based on visual teaching with mitate expression</article-title>. <source>Adv. Robot.</source> <volume>35</volume> (<issue>12</issue>), <fpage>741</fpage>&#x2013;<lpage>755</lpage>. <pub-id pub-id-type="doi">10.1080/01691864.2021.1914724</pub-id>
</citation>
</ref>
<ref id="B68">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Northrup</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sarkar</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Kawamura</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2001</year>). &#x201c;<article-title>Biologically-inspired control architecture for a humanoid robot</article-title>,&#x201d; in <conf-name>Proceedings of the 2001 IEEE/RSJ International Conference on Intelligent Robots and Systems</conf-name> (<publisher-loc>Maui, Hawaii, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1100</fpage>&#x2013;<lpage>1105</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2001.976315</pub-id>
</citation>
</ref>
<ref id="B69">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Ogawa</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Narioka</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Hosoda</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2011</year>). &#x201c;<article-title>Development of whole-body humanoid "Pneumat-BS" with pneumatic musculoskeletal system</article-title>,&#x201d; in <conf-name>2011 IEEE/RSJ International Conference on Intelligent Robots and Systems</conf-name> (<publisher-loc>San Francisco, California, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>4838</fpage>&#x2013;<lpage>4843</lpage>. <pub-id pub-id-type="doi">10.1109/iros.2011.6095091</pub-id>
</citation>
</ref>
<ref id="B70">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Olson</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Abd</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Engeberg</surname>
<given-names>E. D.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Human-inspired robotic eye-hand coordination enables new communication channels between humans and robots</article-title>. <source>Int. J. Soc. Robotics</source> <volume>13</volume> (<issue>5</issue>), <fpage>1033</fpage>&#x2013;<lpage>1046</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-020-00693-2</pub-id>
</citation>
</ref>
<ref id="B71">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Omrcen</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ude</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2010</year>). &#x201c;<article-title>Redundant control of a humanoid robot head with foveated vision for object tracking</article-title>,&#x201d; in <conf-name>2010 IEEE International Conference on Robotics and Automation Anchorage Convention District</conf-name> (<publisher-loc>Anchorage, Alaska, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>4151</fpage>&#x2013;<lpage>4156</lpage>. <pub-id pub-id-type="doi">10.1109/robot.2010.5509515</pub-id>
</citation>
</ref>
<ref id="B72">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Otaki</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Shibata</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>The effect of different visual stimuli on reaction times: a performance comparison of young and middle-aged people</article-title>. <source>J. Phys. Ther. Sci.</source> <volume>31</volume> (<issue>3</issue>), <fpage>250</fpage>&#x2013;<lpage>254</lpage>. <pub-id pub-id-type="doi">10.1589/jpts.31.250</pub-id>
</citation>
</ref>
<ref id="B73">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Paik</surname>
<given-names>J. K.</given-names>
</name>
<name>
<surname>Shin</surname>
<given-names>B. H.</given-names>
</name>
<name>
<surname>Bang</surname>
<given-names>Y.-B.</given-names>
</name>
<name>
<surname>Shim</surname>
<given-names>Y.-B.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Development of an anthropomorphic robotic arm and hand for interactive humanoids</article-title>. <source>J. Bionic Eng.</source> <volume>9</volume> (<issue>2</issue>), <fpage>133</fpage>&#x2013;<lpage>142</lpage>. <pub-id pub-id-type="doi">10.1016/s1672-6529(11)60107-8</pub-id>
</citation>
</ref>
<ref id="B74">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pham</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ariga</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Tominaga</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Oku</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Nakayama</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Uemura</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2014</year>). <article-title>Extraction and implementation of muscle synergies in neuro-mechanical control of upper limb movement</article-title>. <source>Adv. Robot.</source> <volume>28</volume> (<issue>11</issue>), <fpage>745</fpage>&#x2013;<lpage>757</lpage>. <pub-id pub-id-type="doi">10.1080/01691864.2013.876940</pub-id>
</citation>
</ref>
<ref id="B75">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pignat</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Calinon</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Learning adaptive dressing assistance from human demonstration</article-title>. <source>Robotics Aut. Syst.</source> <volume>93</volume>, <fpage>61</fpage>&#x2013;<lpage>75</lpage>. <pub-id pub-id-type="doi">10.1016/j.robot.2017.03.017</pub-id>
</citation>
</ref>
<ref id="B76">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Potkonjak</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Tzafestas</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kostic</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Djordjevic</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>Human-like behavior of robot arms: general considerations and the handwriting task - Part I: mathematical description of human-like motion: distributed positioning and virtual fatigue</article-title>. <source>Robotics Comput. Integr. Manuf.</source> <volume>17</volume> (<issue>4</issue>), <fpage>305</fpage>&#x2013;<lpage>315</lpage>. <pub-id pub-id-type="doi">10.1016/S0736-5845(01)00005-9</pub-id>
</citation>
</ref>
<ref id="B77">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qian</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Valls Miro</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Jing</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Hierarchical and parameterized learning of pick-and-place manipulation from under-specified human demonstrations</article-title>. <source>Adv. Robot.</source> <volume>34</volume> (<issue>13</issue>), <fpage>858</fpage>&#x2013;<lpage>872</lpage>. <pub-id pub-id-type="doi">10.1080/01691864.2020.1778523</pub-id>
</citation>
</ref>
<ref id="B78">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Human-like coordination motion learning for a redundant dual-arm robot</article-title>. <source>Robotics Computer-Integrated Manuf.</source> <volume>57</volume>, <fpage>379</fpage>&#x2013;<lpage>390</lpage>. <pub-id pub-id-type="doi">10.1016/j.rcim.2018.12.017</pub-id>
</citation>
</ref>
<ref id="B79">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rajruangrabin</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Popa</surname>
<given-names>D. O.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Robot head motion control with an emphasis on realism of neck&#x2013;eye coordination during object tracking</article-title>. <source>J. Intelligent Robotic Syst.</source> <volume>63</volume> (<issue>2</issue>), <fpage>163</fpage>&#x2013;<lpage>190</lpage>. <pub-id pub-id-type="doi">10.1007/s10846-010-9468-x</pub-id>
</citation>
</ref>
<ref id="B80">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rohrer</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Fasoli</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Krebs</surname>
<given-names>H. I.</given-names>
</name>
<name>
<surname>Hughes</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Volpe</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Frontera</surname>
<given-names>W. R.</given-names>
</name>
<etal/>
</person-group> (<year>2002</year>). <article-title>Movement smoothness changes during stroke recovery</article-title>. <source>J. Neurosci.</source> <volume>22</volume> (<issue>18</issue>), <fpage>8297</fpage>&#x2013;<lpage>8304</lpage>. <pub-id pub-id-type="doi">10.1523/jneurosci.22-18-08297.2002</pub-id>
</citation>
</ref>
<ref id="B81">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sasagawa</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sakaino</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Tsuji</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Motion generation using bilateral control-based imitation learning with autoregressive learning</article-title>. <source>IEEE Access</source> <volume>9</volume>, <fpage>20508</fpage>&#x2013;<lpage>20520</lpage>. <pub-id pub-id-type="doi">10.1109/access.2021.3054960</pub-id>
</citation>
</ref>
<ref id="B82">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Scholz</surname>
<given-names>J. P.</given-names>
</name>
<name>
<surname>Sch&#xf6;ner</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>1999</year>). <article-title>The uncontrolled manifold concept: identifying control variables for a functional task</article-title>. <source>Exp. Brain Res.</source> <volume>126</volume> (<issue>3</issue>), <fpage>289</fpage>&#x2013;<lpage>306</lpage>. <pub-id pub-id-type="doi">10.1007/s002210050738</pub-id>
</citation>
</ref>
<ref id="B83">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Seraji</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>1989</year>). <article-title>Configuration control of redundant manipulators: theory and implementation</article-title>. <source>IEEE Trans. Robotics Automation</source> <volume>5</volume> (<issue>4</issue>), <fpage>472</fpage>&#x2013;<lpage>490</lpage>. <pub-id pub-id-type="doi">10.1109/70.88062</pub-id>
</citation>
</ref>
<ref id="B84">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shin</surname>
<given-names>S. Y.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Human-like motion generation and control for humanoid&#x27;s dual arm object manipulation</article-title>. <source>IEEE Trans. Industrial Electron.</source> <volume>62</volume> (<issue>4</issue>), <fpage>2265</fpage>&#x2013;<lpage>2276</lpage>. <pub-id pub-id-type="doi">10.1109/tie.2014.2353017</pub-id>
</citation>
</ref>
<ref id="B85">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Silv&#xe9;rio</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Rozo</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Calinon</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Caldwell</surname>
<given-names>D. G.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>Learning bimanual end-effector poses from demonstrations using task-parameterized dynamical systems</article-title>,&#x201d; in <conf-name>2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</conf-name> (<publisher-loc>Hamburg, Germany</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>464</fpage>&#x2013;<lpage>470</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2015.7353413</pub-id>
</citation>
</ref>
<ref id="B86">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Soechting</surname>
<given-names>J. F.</given-names>
</name>
<name>
<surname>Lacquaniti</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>1981</year>). <article-title>Invariant characteristics of a pointing movement in man</article-title>. <source>J. Neurosci.</source> <volume>1</volume> (<issue>7</issue>), <fpage>710</fpage>&#x2013;<lpage>720</lpage>. <pub-id pub-id-type="doi">10.1523/jneurosci.01-07-00710.1981</pub-id>
</citation>
</ref>
<ref id="B87">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Soltani Zarrin</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Zeiaee</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Langari</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Buchanan</surname>
<given-names>J. J.</given-names>
</name>
<name>
<surname>Robson</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Towards autonomous ergonomic upper-limb exoskeletons: a computational approach for planning a human-like path</article-title>. <source>Robotics Aut. Syst.</source> <volume>145</volume>, <fpage>103843</fpage>. <pub-id pub-id-type="doi">10.1016/j.robot.2021.103843</pub-id>
</citation>
</ref>
<ref id="B88">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Soltani-Zarrin</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Zeiaee</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Langari</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Robson</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Reference path generation for upper-arm exoskeletons considering scapulohumeral rhythms</article-title>,&#x201d; in <conf-name>2017 International Conference on Rehabilitation Robotics (ICORR)</conf-name> (<publisher-loc>London, UK</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>753</fpage>&#x2013;<lpage>758</lpage>. <pub-id pub-id-type="doi">10.1109/icorr.2017.8009338</pub-id>
</citation>
</ref>
<ref id="B89">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Su</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Enayati</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Vantadori</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Spinoglio</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ferrigno</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>De Momi</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Online human-like redundancy optimization for tele-operated anthropomorphic manipulators</article-title>. <source>Int. J. Adv. Robotic Syst.</source> <volume>15</volume> (<issue>6</issue>), <fpage>172988141881469</fpage>. <pub-id pub-id-type="doi">10.1177/1729881418814695</pub-id>
</citation>
</ref>
<ref id="B90">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Su</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Qi</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Aliverti</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ferrigno</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>De Momi</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Deep neural network approach in human-like redundancy optimization for anthropomorphic manipulators</article-title>. <source>IEEE Access</source> <volume>7</volume>, <fpage>124207</fpage>&#x2013;<lpage>124216</lpage>. <pub-id pub-id-type="doi">10.1109/access.2019.2937380</pub-id>
</citation>
</ref>
<ref id="B91">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Tahara</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>Z.-W.</given-names>
</name>
<name>
<surname>Arimoto</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2006</year>). &#x201c;<article-title>On control mechanism of human-like reaching movements with musculo-skeletal redundancy</article-title>,&#x201d; in <conf-name>Proceedings of the 2006 IEEE/RSJ International Conference on Intelligent Robots and Systems</conf-name> (<publisher-loc>Beijing, China</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1402</fpage>&#x2013;<lpage>1409</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2006.281931</pub-id>
</citation>
</ref>
<ref id="B92">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ta&#xef;x</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Tran</surname>
<given-names>M. T.</given-names>
</name>
<name>
<surname>Sou&#xe8;res</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Guigon</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Generating human-like reaching movements with a humanoid robot: a computational approach</article-title>. <source>J. Comput. Sci.</source> <volume>4</volume> (<issue>4</issue>), <fpage>269</fpage>&#x2013;<lpage>284</lpage>. <pub-id pub-id-type="doi">10.1016/j.jocs.2012.08.001</pub-id>
</citation>
</ref>
<ref id="B93">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Takano</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Nakamura</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Planning of goal-oriented motion from stochastic motion primitives and optimal controlling of joint torques in whole-body</article-title>. <source>Robotics Aut. Syst.</source> <volume>91</volume>, <fpage>226</fpage>&#x2013;<lpage>233</lpage>. <pub-id pub-id-type="doi">10.1016/j.robot.2017.01.013</pub-id>
</citation>
</ref>
<ref id="B94">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Tang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Cao</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2014</year>). &#x201c;<article-title>Muscle synergy analysis for similar upper limb motion tasks</article-title>,&#x201d; in <conf-name>2014 36th Annual International Conference of the IEEE Engineering in Medicine and Biology Society</conf-name> (<publisher-loc>Chicago, Illinois, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>3590</fpage>&#x2013;<lpage>3593</lpage>. <pub-id pub-id-type="doi">10.1109/EMBC.2014.6944399</pub-id>
</citation>
</ref>
<ref id="B95">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Barsotti</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>X.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Kinematic synergy of multi-DoF movement in upper limb and its application for rehabilitation exoskeleton motion planning</article-title>. <source>Front. Neurorobotics</source> <volume>13</volume>, <fpage>99</fpage>. <pub-id pub-id-type="doi">10.3389/fnbot.2019.00099</pub-id>
</citation>
</ref>
<ref id="B96">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Tangpattanakul</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Artrit</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2009</year>). &#x201c;<article-title>Minimum-time trajectory of robot manipulator using harmony search algorithm</article-title>,&#x201d; in <conf-name>2009 6th International Conference on Electrical Engineering/Electronics, Computer, Telecommunications and Information Technology (ECTI-CON)</conf-name> (<publisher-loc>Chonburi, Thailand</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>354</fpage>&#x2013;<lpage>357</lpage>. <pub-id pub-id-type="doi">10.1109/ecticon.2009.5137025</pub-id>
</citation>
</ref>
<ref id="B97">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Taniai</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Naniwa</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Nishii</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Optimal reaching trajectories based on feedforward control</article-title>. <source>Biol. Cybern.</source> <volume>116</volume> (<issue>4</issue>), <fpage>517</fpage>&#x2013;<lpage>526</lpage>. <pub-id pub-id-type="doi">10.1007/s00422-022-00939-4</pub-id>
</citation>
</ref>
<ref id="B98">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Todorov</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Optimality principles in sensorimotor control</article-title>. <source>Nat. Neurosci.</source> <volume>7</volume> (<issue>9</issue>), <fpage>907</fpage>&#x2013;<lpage>915</lpage>. <pub-id pub-id-type="doi">10.1038/nn1309</pub-id>
</citation>
</ref>
<ref id="B99">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Todorov</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Jordan</surname>
<given-names>M. I.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Smoothness maximization along a predefined path accurately predicts the speed profiles of complex arm movements</article-title>. <source>J. Neurophysiology</source> <volume>80</volume> (<issue>2</issue>), <fpage>696</fpage>&#x2013;<lpage>714</lpage>. <pub-id pub-id-type="doi">10.1152/jn.1998.80.2.696</pub-id>
</citation>
</ref>
<ref id="B100">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Togo</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kagawa</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Uno</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Uncontrolled manifold reference feedback control of multi-joint robot arms</article-title>. <source>Front. Comput. Neurosci.</source> <volume>10</volume>, <fpage>69</fpage>. <pub-id pub-id-type="doi">10.3389/fncom.2016.00069</pub-id>
</citation>
</ref>
<ref id="B101">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tommasino</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Campolo</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Task-space separation principle: a force-field approach to motion planning for redundant manipulators</article-title>. <source>Bioinspir Biomim.</source> <volume>12</volume> (<issue>2</issue>), <fpage>026003</fpage>. <pub-id pub-id-type="doi">10.1088/1748-3190/aa5558</pub-id>
</citation>
</ref>
<ref id="B102">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Von Zitzewitz</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Boesch</surname>
<given-names>P. M.</given-names>
</name>
<name>
<surname>Wolf</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Riener</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Quantifying the human likeness of a humanoid robot</article-title>. <source>Int. J. Soc. Robotics</source> <volume>5</volume> (<issue>2</issue>), <fpage>263</fpage>&#x2013;<lpage>276</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-012-0177-4</pub-id>
</citation>
</ref>
<ref id="B103">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wada</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Kaneko</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Nakano</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Osu</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Kawato</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>Quantitative examinations for multi joint arm trajectory planning-using a robust calculation algorithm of the minimum commanded torque change trajectory</article-title>. <source>Neural Netw.</source> <volume>14</volume> (<issue>4-5</issue>), <fpage>381</fpage>&#x2013;<lpage>393</lpage>. <pub-id pub-id-type="doi">10.1016/s0893-6080(01)00026-0</pub-id>
</citation>
</ref>
<ref id="B104">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Tian</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>&#x3b1;-Variable adaptive model free control of iReHave upper-limb exoskeleton</article-title>. <source>Adv. Eng. Softw.</source> <volume>148</volume>, <fpage>102872</fpage>. <pub-id pub-id-type="doi">10.1016/j.advengsoft.2020.102872</pub-id>
</citation>
</ref>
<ref id="B105">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wei</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Designing human-like behaviors for anthropomorphic arm in humanoid robot NAO</article-title>. <source>Robotica</source> <volume>38</volume> (<issue>7</issue>), <fpage>1205</fpage>&#x2013;<lpage>1226</lpage>. <pub-id pub-id-type="doi">10.1017/s026357471900136x</pub-id>
</citation>
</ref>
<ref id="B106">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wenderoth</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Kupferberg</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Huber</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Helfer</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Lenz</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Knoll</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2012</year>). <article-title>Moving just like you: motor interference depends on similar motility of agent and observer</article-title>. <source>PLoS ONE</source> <volume>7</volume> (<issue>6</issue>), <fpage>e39637</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0039637</pub-id>
</citation>
</ref>
<ref id="B107">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wochner</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Driess</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Zimmermann</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Haeufle</surname>
<given-names>D. F. B.</given-names>
</name>
<name>
<surname>Toussaint</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Schmitt</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Optimality principles in human point-to-manifold reaching accounting for muscle dynamics</article-title>. <source>Front. Comput. Neurosci.</source> <volume>14</volume>, <fpage>38</fpage>. <pub-id pub-id-type="doi">10.3389/fncom.2020.00038</pub-id>
</citation>
</ref>
<ref id="B108">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wolpert</surname>
<given-names>D. M.</given-names>
</name>
<name>
<surname>Ghahramani</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>Computational principles of movement neuroscience</article-title>. <source>Nat. Neurosci.</source> <volume>3</volume> (<issue>11</issue>), <fpage>1212</fpage>&#x2013;<lpage>1217</lpage>. <pub-id pub-id-type="doi">10.1038/81497</pub-id>
</citation>
</ref>
<ref id="B109">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xia</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Tian</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Dual fast marching tree algorithm for human-like motion planning of anthropomorphic arms with task constraints</article-title>. <source>IEEE-ASME Trans. Mechatronics</source> <volume>26</volume> (<issue>5</issue>), <fpage>2803</fpage>&#x2013;<lpage>2813</lpage>. <pub-id pub-id-type="doi">10.1109/tmech.2020.3047476</pub-id>
</citation>
</ref>
<ref id="B110">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Xie</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2011</year>). &#x201c;<article-title>Human-like motion planning for robotic arm system</article-title>,&#x201d; in <conf-name>The 15th International Conference on Advanced Robotics</conf-name> (<publisher-loc>Tallinn, Estonia</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>88</fpage>&#x2013;<lpage>93</lpage>. <pub-id pub-id-type="doi">10.1109/icar.2011.6088543</pub-id>
</citation>
</ref>
<ref id="B111">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xue</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zuo</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Bueno</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A robot human-like learning framework applied to unknown environment interaction</article-title>. <source>Complexity</source> <volume>2022</volume>, <fpage>1</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1155/2022/5648826</pub-id>
</citation>
</ref>
<ref id="B112">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yamane</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Kinematic redundancy resolution for humanoid robots by human motion database</article-title>. <source>IEEE Robotics Automation Lett.</source> <volume>5</volume> (<issue>4</issue>), <fpage>6948</fpage>&#x2013;<lpage>6955</lpage>. <pub-id pub-id-type="doi">10.1109/lra.2020.3026972</pub-id>
</citation>
</ref>
<ref id="B113">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Naeem</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>FeiChen</surname>
<given-names>M. L.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Humanoid motion planning of robotic arm based on human arm action feature and reinforcement learning</article-title>. <source>Mechatronics</source> <volume>78</volume>, <fpage>102630</fpage>. <pub-id pub-id-type="doi">10.1016/j.mechatronics.2021.102630</pub-id>
</citation>
</ref>
<ref id="B114">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yi</surname>
<given-names>J.-B.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Park</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yi</surname>
<given-names>S.-J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Anthropomorphic grasping of complex-shaped objects using imitation learning</article-title>. <source>Appl. Sci.</source> <volume>12</volume> (<issue>24</issue>), <fpage>12861</fpage>. <pub-id pub-id-type="doi">10.3390/app122412861</pub-id>
</citation>
</ref>
<ref id="B115">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Young</surname>
<given-names>S. J.</given-names>
</name>
<name>
<surname>Pratt</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Chau</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Target-directed movements at a comfortable pace: movement duration and fitts&#x27;s law</article-title>. <source>J. Mot. Behav.</source> <volume>41</volume> (<issue>4</issue>), <fpage>339</fpage>&#x2013;<lpage>346</lpage>. <pub-id pub-id-type="doi">10.3200/jmbr.41.4.339-346</pub-id>
</citation>
</ref>
<ref id="B116">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Zacharias</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Schlette</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Schmidt</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Borst</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Rossmann</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Hirzinger</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2011</year>). &#x201c;<article-title>Making planned paths look more human-like in humanoid robot manipulation planning</article-title>,&#x201d; in <conf-name>2011 IEEE International Conference on Robotics and Automation Shanghai International Conference Center</conf-name> (<publisher-loc>Shanghai, China</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1192</fpage>&#x2013;<lpage>1198</lpage>. <pub-id pub-id-type="doi">10.1109/icra.2011.5979553</pub-id>
</citation>
</ref>
<ref id="B117">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zanchettin</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Bascetta</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Rocco</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Acceptability of robotic manipulators in shared working environments through human-like redundancy resolution</article-title>. <source>Appl. Ergon.</source> <volume>44</volume> (<issue>6</issue>), <fpage>982</fpage>&#x2013;<lpage>989</lpage>. <pub-id pub-id-type="doi">10.1016/j.apergo.2013.03.028</pub-id>
</citation>
</ref>
<ref id="B118">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Zanchettin</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Rocco</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Bascetta</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Symeonidis</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Peldschus</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2011</year>). &#x201c;<article-title>Kinematic analysis and synthesis of the human arm motion during a manipulation task</article-title>,&#x201d; in <conf-name>2011 IEEE International Conference on Robotics and Automation (ICRA)</conf-name> (<publisher-loc>Shanghai, China</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>2692</fpage>&#x2013;<lpage>2697</lpage>. <pub-id pub-id-type="doi">10.1109/icra.2011.5979654</pub-id>
</citation>
</ref>
<ref id="B119">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Qi</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Quan</surname>
<given-names>H.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>A novel human-like control framework for mobile medical service robot</article-title>. <source>Complexity</source> <volume>2020</volume>, <fpage>1</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1155/2020/2905841</pub-id>
</citation>
</ref>
<ref id="B120">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Beck</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Magnenat-Thalmann</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Human-like behavior generation based on head-arms model for robot tracking external targets and body parts</article-title>. <source>IEEE Trans. Cybern.</source> <volume>45</volume> (<issue>8</issue>), <fpage>1390</fpage>&#x2013;<lpage>1400</lpage>. <pub-id pub-id-type="doi">10.1109/tcyb.2014.2351416</pub-id>
</citation>
</ref>
<ref id="B121">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>WenScano</surname>
<given-names>H. A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Number of trials and data structure affect the number and components of muscle synergies in upper-limb reaching movements</article-title>. <source>Physiol. Meas.</source> <volume>43</volume> (<issue>10</issue>), <fpage>105008</fpage>. <pub-id pub-id-type="doi">10.1088/1361-6579/ac9773</pub-id>
</citation>
</ref>
<ref id="B122">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Solving the inverse kinematics problem of multiple redundant manipulators with collision avoidance in dynamic environments</article-title>. <source>J. Intelligent Robotic Syst.</source> <volume>101</volume> (<issue>2</issue>), <fpage>30</fpage>. <pub-id pub-id-type="doi">10.1007/s10846-020-01279-w</pub-id>
</citation>
</ref>
<ref id="B123">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zucker</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ratliff</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Dragan</surname>
<given-names>A. D.</given-names>
</name>
<name>
<surname>Pivtoraiko</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Klingensmith</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Dellin</surname>
<given-names>C. M.</given-names>
</name>
<etal/>
</person-group> (<year>2013</year>). <article-title>CHOMP: covariant Hamiltonian optimization for motion planning</article-title>. <source>Int. J. robotics Res.</source> <volume>32</volume> (<issue>9-10</issue>), <fpage>1164</fpage>&#x2013;<lpage>1193</lpage>. <pub-id pub-id-type="doi">10.1177/0278364913488805</pub-id>
</citation>
</ref>
<ref id="B124">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Zuher</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Romero</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2012</year>). &#x201c;<article-title>Recognition of human motions for imitation and control of a humanoid robot</article-title>,&#x201d; in <conf-name>2012 Brazilian Robotics Symposium and Latin American Robotics Symposium</conf-name> (<publisher-loc>Fortaleza, Brazil</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>190</fpage>&#x2013;<lpage>195</lpage>. <pub-id pub-id-type="doi">10.1109/sbr-lars.2012.38</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>