<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Robot. AI</journal-id>
<journal-title>Frontiers in Robotics and AI</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Robot. AI</abbrev-journal-title>
<issn pub-type="epub">2296-9144</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/frobt.2021.611251</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Robotics and AI</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Model-Augmented Haptic Telemanipulation: Concept, Retrospective Overview, and Current Use Cases</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Hulin</surname> <given-names>Thomas</given-names></name>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/861248/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Panzirsch</surname> <given-names>Michael</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1226818/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Singh</surname> <given-names>Harsimran</given-names></name>
</contrib>
<contrib contrib-type="author">
<name><surname>Coelho</surname> <given-names>Andre</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1207031/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Balachandran</surname> <given-names>Ribin</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1335640/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Pereira</surname> <given-names>Aaron</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1226877/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Weber</surname> <given-names>Bernhard M.</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/947111/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Bechtel</surname> <given-names>Nicolai</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1253060/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Riecke</surname> <given-names>Cornelia</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1341276/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Brunner</surname> <given-names>Bernhard</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/603262/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Lii</surname> <given-names>Neal Y.</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1248024/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Klodmann</surname> <given-names>Julian</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1336581/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Hellings</surname> <given-names>Anja</given-names></name>
</contrib>
<contrib contrib-type="author">
<name><surname>Hagmann</surname> <given-names>Katharina</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1210319/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Quere</surname> <given-names>Gabriel</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1335389/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Bauer</surname> <given-names>Adrian S.</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1335317/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Sierotowicz</surname> <given-names>Marek</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1100149/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Lampariello</surname> <given-names>Roberto</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/515803/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Vogel</surname> <given-names>J&#x000F6;rn</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/505304/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Dietrich</surname> <given-names>Alexander</given-names></name>
</contrib>
<contrib contrib-type="author">
<name><surname>Leidner</surname> <given-names>Daniel</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/1335474/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ott</surname> <given-names>Christian</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/153890/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Hirzinger</surname> <given-names>Gerd</given-names></name>
</contrib>
<contrib contrib-type="author">
<name><surname>Albu-Sch&#x000E4;ffer</surname> <given-names>Alin</given-names></name>
<uri xlink:href="http://loop.frontiersin.org/people/617795/overview"/>
</contrib>
</contrib-group>
<aff><institution>Institute of Robotics and Mechatronics, German Aerospace Center (DLR)</institution>, <addr-line>Wessling</addr-line>, <country>Germany</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Ioannis Dimitrios Zoulias, United Kingdom Atomic Energy Authority, United Kingdom</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Guy Burroughes, United Kingdom Atomic Energy Authority, United Kingdom; Sigal Berman, Ben-Gurion University of the Negev, Israel</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Thomas Hulin <email>thomas.hulin&#x00040;dlr.de</email></corresp>
<fn fn-type="other" id="fn001"><p>This article was submitted to Human-Robot Interaction, a section of the journal Frontiers in Robotics and AI</p></fn></author-notes>
<pub-date pub-type="epub">
<day>11</day>
<month>06</month>
<year>2021</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>8</volume>
<elocation-id>611251</elocation-id>
<history>
<date date-type="received">
<day>28</day>
<month>09</month>
<year>2020</year>
</date>
<date date-type="accepted">
<day>03</day>
<month>03</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2021 Hulin, Panzirsch, Singh, Coelho, Balachandran, Pereira, Weber, Bechtel, Riecke, Brunner, Lii, Klodmann, Hellings, Hagmann, Quere, Bauer, Sierotowicz, Lampariello, Vogel, Dietrich, Leidner, Ott, Hirzinger and Albu-Sch&#x000E4;ffer.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Hulin, Panzirsch, Singh, Coelho, Balachandran, Pereira, Weber, Bechtel, Riecke, Brunner, Lii, Klodmann, Hellings, Hagmann, Quere, Bauer, Sierotowicz, Lampariello, Vogel, Dietrich, Leidner, Ott, Hirzinger and Albu-Sch&#x000E4;ffer</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract><p>Certain telerobotic applications, including telerobotics in space, pose particularly demanding challenges to both technology and humans. Traditional bilateral telemanipulation approaches often cannot be used in such applications due to technical and physical limitations such as long and varying delays, packet loss, and limited bandwidth, as well as high reliability, precision, and task duration requirements. In order to close this gap, we research model-augmented haptic telemanipulation (MATM) that uses two kinds of models: a remote model that enables shared autonomous functionality of the teleoperated robot, and a local model that aims to generate assistive augmented haptic feedback for the human operator. Several technological methods that form the backbone of the MATM approach have already been successfully demonstrated in accomplished telerobotic space missions. On this basis, we have applied our approach in more recent research to applications in the fields of orbital robotics, telesurgery, caregiving, and telenavigation. In the course of this work, we have advanced specific aspects of the approach that were of particular importance for each respective application, especially shared autonomy, and haptic augmentation. This overview paper discusses the MATM approach in detail, presents the latest research results of the various technologies encompassed within this approach, provides a retrospective of DLR&#x00027;s telerobotic space missions, demonstrates the broad application potential of MATM based on the aforementioned use cases, and outlines lessons learned and open challenges.</p></abstract>
<kwd-group>
<kwd>telerobotics</kwd>
<kwd>model-augmented telemanipulation</kwd>
<kwd>shared control</kwd>
<kwd>shared autonomy</kwd>
<kwd>haptic constraints</kwd>
</kwd-group>
<counts>
<fig-count count="15"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="103"/>
<page-count count="22"/>
<word-count count="15452"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1. Introduction</title>
<p>Telerobotics is a powerful tool to combine the benefits of robotic manipulation with human mental abilities and manipulation strategies. Modern bilateral teleoperation systems provide haptic feedback that enables a human operator to perceive interaction forces and&#x02014;more importantly&#x02014;to intuitively control the forces applied by a teleoperated robot on its environment. This kind of feedback is crucial for delicate applications and tasks that comprise handling of fragile, dangerous, or expensive parts, or require high precision as it enables the operator to feel guiding structures or sliding on surfaces with limited forces. Such situations typically occur for applications in space, biochemical laboratories, or radiation environments. The latter was also the motivation for the development of many of the early telemanipulation systems that handled highly radioactive materials starting in the 1940s (cyberneticzoo.com, <xref ref-type="bibr" rid="B1">2014</xref>). While these early systems were purely mechanically coupled, a revolution in telemanipulation occurred with the introduction of information technology (IT), which made it possible to electronically couple the haptic interaction device to the remote system. The major advantages of this innovation were (i) the ability to cover greater distances, (ii) a greater flexibility in control, (iii) a clearer presentation of forces, but above all (iv) a drastic reduction of apparent inertia. In addition to numerous incremental improvements in hardware and control approaches, there were a few other relatively new developments that significantly advanced the applicability and ease of use of telerobotics.</p>
<p>First, software-generated constraints that can limit the position or force of the haptic device or remote robot were introduced as so-called virtual fixtures (VFs; Rosenberg, <xref ref-type="bibr" rid="B74">1993b</xref>). They guide the robot through a predefined desired path or restrict it from getting into a forbidden region of the workspace. Thus, VFs reduce the control freedom given to the operator while enhancing task accuracy and task completion time (Kang et al., <xref ref-type="bibr" rid="B41">2004</xref>). They are also ideal for tasks requiring speed and precision while being repetitive in nature (Payandeh and Stanisic, <xref ref-type="bibr" rid="B65">2002</xref>). Therefore, VFs are a great candidate for applications such as laparoscopic surgery, where they add an additional layer of safety and increase the surgeon&#x00027;s dexterity (Turro and Khatib, <xref ref-type="bibr" rid="B93">2001</xref>). However, they have also proven to be highly beneficial for telemanipulation tasks with very long time delays (Xia et al., <xref ref-type="bibr" rid="B101">2012</xref>).</p>
<p>Second, diverse forms of cooperation between operator and robot emerged, such as <italic>supervisory control</italic> (Ferrell and Sheridan, <xref ref-type="bibr" rid="B27">1967</xref>; Sheridan, <xref ref-type="bibr" rid="B82">1992</xref>) or <italic>shared control</italic> (Anderson, <xref ref-type="bibr" rid="B5">1994</xref>). These approaches, subsumed under the term <italic>shared autonomy</italic>, aim at overcoming limitations of the operator that are due to the complexity of the robot or time delay between operator and robot by transferring some workload to the robot. Embedded into this concept, shared control refers to a continuous blend of human and robot control, ranging from safeguarding techniques (Fong et al., <xref ref-type="bibr" rid="B28">2001</xref>), where the robot validates the operator&#x00027;s input, to adaptive virtual fixtures (Aarno et al., <xref ref-type="bibr" rid="B2">2005</xref>), that support the operator in achieving predicted goals. Supervisory control, on the other hand, refers to an intermittent programming of the robot while the robot engages in a closed-loop interaction with its environment.</p>
<p>Third, model-mediated telemanipulation (MMT) was introduced where the user interacts with a local haptically rendered model estimate of the remote environment that is constantly updated, instead of being directly coupled to a teleoperated robot (Hannaford, <xref ref-type="bibr" rid="B34">1989</xref>; Mitra and Niemeyer, <xref ref-type="bibr" rid="B51">2008</xref>). The closed loop controller gets split into two control loops on either side of the communication channel, i.e., the haptic device and the remote robot side. Such an architecture reduces the conservatism while maintaining stability for arbitrary time delays. MMT has also been extended to multi-operator multi-robot systems to enhance performance compared to the classical bilateral approach (Passenberg et al., <xref ref-type="bibr" rid="B64">2010</xref>). Despite its advantages, MMT has a few unresolved challenges. One of which is the unstable haptic rendering on the operator side during drastic changes in the updated local model. Another, and perhaps more significant, hurdle is the environment modeling. A model mismatch can result in transmitting dangerous position information for the robot to follow, which can end up with the robot exerting high forces and thereby damaging itself and the remote environment (Xu et al., <xref ref-type="bibr" rid="B102">2016</xref>). To this end, reinforcement learning has recently been integrated into the concept of MMT in order to adapt to new environmental conditions and to cope with high uncertainties (Beik-Mohammadi et al., <xref ref-type="bibr" rid="B15">2020</xref>).</p>
<p>While stability is not an issue in an ideal system without delays and with unlimited communication bandwidth, real-world scenarios, especially those with communication over long distances, pose additional challenges in terms of control. To this end, bilateral control approaches have been continuously evolved in parallel to the aforementioned developments, and today enable haptic telemanipulation via communication including time delays of several seconds (Panzirsch et al., <xref ref-type="bibr" rid="B60">2020a</xref>). Although such approaches can guarantee stable operation, telemanipulation with such significant delay still remains demanding for the operator, and a more powerful approach facilitating the task would be useful.</p>
<p>One of the main research interests at DLR is to enable robots to operate in orbit and on the surface of celestial bodies and to perform exploration or construction tasks there. <xref ref-type="fig" rid="F1">Figure 1</xref> illustrates this vision and shows a spectrum of robotic systems to realize this goal. However, since robots are currently not able to operate fully autonomously, telerobotics is key to achieve this goal. The robots on the surface can be operated either from Earth or from a spacecraft, depending on the distance and the availability of a spacecraft.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Illustration of DLR&#x00027;s space robot vision. Teleoperation is a key topic of DLR&#x00027;s long-term research endeavors for robot applications on celestial bodies and is illustrated by the example of telemanipulation of a humanoid robot from a spacecraft. While the number of tasks that robots can perform autonomously is steadily increasing, teleoperation will still be required over the next few years or decades for situations where autonomy fails.</p></caption>
<graphic xlink:href="frobt-08-611251-g0001.tif"/>
</fig>
<p>This overview paper presents the model-augmented haptic telemanipulation (MATM) approach as a promising solution for such a telerobotic scenario. This approach uses two kinds of models, a remote model to enable shared autonomous functionality of the teleoperated robot and a local model to generate assistive augmented haptic feedback for the human operator. The forces that are displayed to the operator are a combination of augmented forces from the local model and forces resulting from interaction between the robot and the distant environment. The remote model is an environmental model of the remote environment to enable shared autonomy functionality to the teleoperated robot. The MATM approach can be considered as generalization of MMT, where the user interacts with a local model that acts as medium between the haptic device and the teleoperated robot. Yet, MATM has two major differences, i.e., the feedback to the human is a combination of real and augmented virtual feedback, and a remote model is introduced to enable shared autonomy of the remote robot.</p>
<p>MATM can be regarded as an intermediate step toward supervised and fully autonomous manipulation. <xref ref-type="fig" rid="F2">Figure 2</xref> illustrates how this approach differentiates from classical bilateral telemanipulation, telenavigation of mobile robots, and supervised autonomy in terms of time delay and visual feedback quality. With increasing levels of support and autonomy, higher delays can be dealt with and visual quality demands decrease. The figure also shows the delays that occurred in some of the missions and use cases described in this paper.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Schematic diagram that illustrates up to which time delay and under which visual conditions different telerobotic concepts can be applied.</p>
<p>It also shows the delays that occurred for selected missions from section 3 and use cases from section 4 (credit for photos of the Moon, the Mars, and the gateway: NASA).</p></caption>
<graphic xlink:href="frobt-08-611251-g0002.tif"/>
</fig>
<p>The paper first presents MATM in detail and provides a state-of-the-art research overview in the underlying technologies used (section 2). Second, it gives a historical overview of the robotic space missions that were the main driving force behind this technology and highlights which aspects of MATM were advanced in each mission (section 3). Third, it discusses the potential and limitations of MATM based on use cases in different applications (section 4). In addition, the paper is also intended to serve as a reference work and therefore contains references to key publications that provide further details on specific aspects of the respective technology, mission, or use case.</p>
</sec>
<sec id="s2">
<title>2. Model-Augmented Telemanipulation</title>
<p>While in classical telemanipulation the operator is coupled to a remote robot via a haptic device, we aim to reach improved performance, efficiency, and ease of use during demanding telemanipulation tasks by means of two models that generate augmented feedback to the human operator and support the movements of the remote robot. <xref ref-type="fig" rid="F3">Figure 3</xref> schematically depicts this MATM approach and illustrates the elements that play a key role in it. The haptic interaction device acts as an input and output interface for the human operator and provides haptic force feedback. The remote robot is telemanipulated by the human operator and is intended to execute the desired commands in a remote environment. The communication channel connecting the two systems can cause a significant delay due to long transmission distances or limitations in the communication infrastructure. On each side of the channel, a model supports the movements or augments feedback, respectively. The following subsections describe the most important challenges in detail and outline our proposed solution. The applications of the methods described in this section along with its project or use case description will be presented in sections 3 and 4.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Illustration of the control scheme of MATM. The local and the remote model can both read and modify (or augment) the commands to the remote robot and the feedback to the operator. Certain methods and situations demand such full signal access, as explained in the respective subsections.</p></caption>
<graphic xlink:href="frobt-08-611251-g0003.tif"/>
</fig>
<sec>
<title>2.1. Telemanipulation Under Time Delay</title>
<p>Traditional bilateral control approaches, such as Lawrence&#x00027;s well-known 4-channel architecture (Lawrence, <xref ref-type="bibr" rid="B43">1993</xref>), enable telemanipulation with force feedback. For space applications, other factors need to be considered, such as motion (Onal and Sitti, <xref ref-type="bibr" rid="B56">2009</xref>) and force scaling (Goldfarb, <xref ref-type="bibr" rid="B30">1998</xref>), which address the differences in precision requirements and is used for training purposes, or indexing (Johnsen and Corliss, <xref ref-type="bibr" rid="B40">1971</xref>) which is a displacement technique to avoid reaching the workspace limits of the haptic device (Hagn et al., <xref ref-type="bibr" rid="B33">2010</xref>). Most importantly, control approaches require considering the time delay in the communication channel that originate due to the huge distances between the operator and remote robot, which can have severe destabilizing consequences.</p>
<p>Extensive research has been carried out toward addressing the issue of stability for delayed bilateral teleoperation system, of which passivity-based methods are widely accepted and recognized due to their robustness and ease of applicability to any linear or nonlinear system independent of their model parameters. The Time Domain Passivity Approach (TDPA; Ryu et al., <xref ref-type="bibr" rid="B75">2010</xref>) has garnered attention for being robust to variable delay and for being the least conservative of the passivity-based approaches. A novel 4-channel architecture using TDPA was implemented and tested in a real space experiment where the cosmonauts aboard the International Space Station (ISS) stably teleoperated a manipulator arm with two degrees of freedom (DoF) on Earth despite the inherent time delay (Artigas et al., <xref ref-type="bibr" rid="B7">2016b</xref>). Nevertheless, TDPA too suffers from delay-dependent position drift and high frequency force oscillations. Therefore, some enhanced methods were proposed recently to remove this position mismatch between the haptic interaction device and remote robot while improving force transparency and enhancing the task performance (Coelho et al., <xref ref-type="bibr" rid="B24">2018</xref>; Singh et al., <xref ref-type="bibr" rid="B84">2018</xref>, <xref ref-type="bibr" rid="B85">2019a</xref>; Panzirsch et al., <xref ref-type="bibr" rid="B61">2020b</xref>).</p>
<p>Although position drift is an undesired phenomenon in telemanipulation, the authors of Panzirsch et al. (<xref ref-type="bibr" rid="B60">2020a</xref>) use it to their advantage to achieve a safe robot&#x02013;environment interaction by using measured force feedback for the TDPA energy observations. Experimental validation for tasks such as slide &#x00026; plug-in and pick &#x00026; place were carried out safely and with a force feedback of sufficient quality, even with time delay of up to 3 s. This control algorithm was also extended for delayed telenavigation, where fictitious forces were generated by a set of &#x0201C;predictive&#x0201D; polygons, implemented in the driving direction of a mobile robot, overlapping with the objects in a depth data map (Sierotowicz et al., <xref ref-type="bibr" rid="B83">2020</xref>).</p>
<p>Almost all of the state-of-the-art bilateral teleoperation controllers are implemented on both sides of the communication channel, i.e., on the local and the remote side. It would be advantageous if the stabilizing controller is implemented on either side of the communication channel, i.e., on the local side or on the remote side, as this would reduce the reliability on communication bandwidth and therefore diminish the effects of packet delay, loss, and jitter. This was recently achieved by the proxy-based controller (Singh et al., <xref ref-type="bibr" rid="B87">2020</xref>) that is only implemented on the local side. Experimental results showed enhanced position synchronization and realistic impedance matching for a communication suffering from unknown time-varying delays of up to 2 s, and interacting with an active environment.</p>
<p>The above methods form the backbone of stable bilateral control even for communication that includes a delay of several seconds. On this basis, haptic augmentation and shared autonomy can enrich the telemanipulation framework as explained in the subsequent sections.</p>
</sec>
<sec>
<title>2.2. Haptic Augmentation</title>
<p>Haptic augmentation is the blending of the feedback from a remote robot with the feedback from a model. This haptic feedback is augmented to the haptic device so that it can be perceived by the operator and provide support during telemanipulation. While for many telemanipulation systems with negligible communication delay a distinction between local and remote model does not play a role, the situation is different for space applications in which communication delay affects the telemanipulation. Normally this feedback is implemented on the local model in order to obtain haptic support without delay. In some applications, such augmentation is also fed to the remote robot to achieve a more direct reaction and higher precision in manipulations tasks (this signal path is represented by the bi-directional arrow B in <xref ref-type="fig" rid="F3">Figure 3</xref>).</p>
<p>A standard tool in haptics for generating such feedback are <italic>haptic constraints</italic>&#x02014;also denoted as <italic>virtual fixtures</italic>. The concept of virtual fixtures was introduced by Rosenberg (<xref ref-type="bibr" rid="B72">1992</xref>) to support the operator during a telemanipulation task and was also evaluated for time-delayed systems (Rosenberg, <xref ref-type="bibr" rid="B73">1993a</xref>). Virtual fixtures are control algorithms which regulate manipulator motion, surveyed in Bowyer et al. (<xref ref-type="bibr" rid="B19">2013</xref>). They are typically employed to support or guide the operator for high precision tasks, avoid critical regions in which the remote robot could cause some damage, and avoid running into robotic constraints such as workspace limits or singularities.</p>
<p>To enable more general geometries for virtual fixtures, haptic algorithms can be used instead of geometric primitives. A prominent example of such an algorithm is the <italic>Voxmap PointShell Algorithm</italic> that uses volumetric data structures and is able to compute collision forces at haptic rates (1 kHz) even for extremely complex geometries in multi-object simulations (McNeely et al., <xref ref-type="bibr" rid="B47">2006</xref>; Sagardia, <xref ref-type="bibr" rid="B76">2019</xref>). Such an algorithm can also be combined with a physics-engine to include physical phenomena in the simulation (Sagardia et al., <xref ref-type="bibr" rid="B77">2014</xref>). This capability is very useful for telemanipulation to predict object movements and poses and thus counteract the effects of time delays.</p>
<p>The concept of augmented haptics can also be applied for bimanual telemanipulation tasks or multilateral teleoperation in which more than one haptic device or remote robot is used. For instance, if a high precision in orientation is demanded, two haptic devices can be coupled with a virtual rigid link to create an additional virtual grasping point that helps the operator to precisely set orientations (Panzirsch et al., <xref ref-type="bibr" rid="B59">2018a</xref>). In cooperative tasks, where two operators jointly manipulate an object, knowing the intention of the respective other operator would be useful. Providing haptic information about the collaborating operator&#x00027;s intention is faster (force = acceleration) than on the visual/audio channel (velocity information). This concept was already evaluated in a delicate experiment with flexible objects involving the ISS (Panzirsch et al., <xref ref-type="bibr" rid="B58">2017</xref>). In this experiment, the intention was measured by force sensors at the two haptic devices.</p>
<p>A challenge in this task is to differentiate between the feedback from intention and from the remote robot. In general, the operator should be able to distinguish between real and extended haptic feedback. One approach to achieve this is to apply a drastically higher stiffness for the amplified haptic feedback than for the one from the remote environment, which is feasible because the signals of the local path are not affected by the time delay of the communication channel (Hulin et al., <xref ref-type="bibr" rid="B38">2013</xref>; Singh et al., <xref ref-type="bibr" rid="B86">2019b</xref>). Another open question is how to best parameterize and distribute haptic augmentation between the local and the remote model. Future theoretical investigations and user studies should address this topic.</p>
<p>Feedback similar to haptic augmentation may also be implemented directly on the remote side and thus support the remote robot&#x00027;s movements without having to send commands over the communication channel first, making it faster and more precise compared to using a local model. This kind of model-based support of the remote robot belongs to the field of shared autonomy, which is discussed in the subsequent section.</p>
</sec>
<sec>
<title>2.3. Shared Autonomy</title>
<p>Commanding robots is a highly demanding, tedious task for humans. This is partially because of the sheer number of degrees of freedom that need to be orchestrated, partially because of time delays that cause adoption of the move-and-wait strategy (Ferrell, <xref ref-type="bibr" rid="B26">1965</xref>). <italic>Shared autonomy</italic> (also known as <italic>mixed initiative interaction</italic>) is an umbrella term subsuming multiple techniques that aim at reducing the workload of the operator by delegating some of the control to the robot (Goodrich et al., <xref ref-type="bibr" rid="B31">2013</xref>). Examples of shared autonomy are <italic>supervised control</italic>, where the operator commands the robot intermittently with high-level tasks while the robot engages in a closed-loop interaction with the environment, and <italic>shared control</italic>, where continuous input from the user is processed by the robot in order to validate, augment, or map it to higher dimensions.</p>
<p>In the MATM approach, we use these techniques to support the operator while performing a telemanipulation task. In shared control, the robot may relieve the operator by taking over certain subtasks of the robot. An easy-to-understand example is to constrain the orientation of a manipulated object (Quere et al., <xref ref-type="bibr" rid="B68">2020</xref>). To achieve such support, the methods of the previous section on haptic augmentation can be used and applied on the remote robot. The advantage compared over applying haptic augmentation to the haptic device is higher precision and faster reaction (without the delay of the communication channel). In addition, the shared control algorithm can take control of non-telemanipulated robot parts or joints. An example is automatic positioning of robot hand fingers to establish stable grasp (Hertkorn, <xref ref-type="bibr" rid="B36">2016</xref>).</p>
<p>In a mixed initiative shared control approach, the weighted sum of the commands (positions/forces/torques etc.) from both agents, namely the human operator and the autonomous system, is given to the remote robot as the final command signal (Musi&#x00107; and Hirche, <xref ref-type="bibr" rid="B52">2017</xref>). The weights for the individual commands are called task or authority allocation factors (Parasuraman and Riley, <xref ref-type="bibr" rid="B63">1997</xref>). These factors can be fixed (Panzirsch et al., <xref ref-type="bibr" rid="B59">2018a</xref>), or time varying to account for certain situation changes (Inagaki, <xref ref-type="bibr" rid="B39">2003</xref>). In a recent publication, we developed a novel time varying approach, where the authority is shifted from the autonomous system to the human operator based on real measurement noise (Balachandran et al., <xref ref-type="bibr" rid="B10">2020a</xref>) using Bayesian filters. This means that in case the autonomous system is not able to complete the task at hand due to bad measurements, the human operator is asked for intervention and to implement corrective measures to complete the task. If and when the sensor measurement quality improves, the control authority is smoothly given back to the autonomous system. This reduces the physical and mental efforts demanded from the operator as he has to intervene if and only when the autonomous system has low confidence in its own task completion ability.</p>
<p>Although more robust, fixed authority allocation-based shared control limits possibilities of human intervention in case of failure of autonomy. On the other hand, adaptive allocation factors are more robust against autonomy failures but are sensitive to the probabilistic filters&#x00027; convergence. Further improvements can be made to optimize the mixed-initiative approach by combining confidence factors from both autonomy and the human operator, availing possibilities offered by artificial intelligence and machine learning.</p>
<p>While shared control depends on continual input from the user, supervised control can deal with intermittent input and is thus suitable for commanding multiple robots. We apply supervised control in a two-step approach. First, the user specifies a goal in an intuitive user interface (UI) (Birkenkampf et al., <xref ref-type="bibr" rid="B16">2014</xref>), which is then translated into <italic>Planning Domain Definition Language</italic> (Ghallab et al., <xref ref-type="bibr" rid="B29">1998</xref>). Second, the robot uses its local autonomy to reach the goal without any further need of user intervention. The local autonomy is based on <italic>action templates</italic>, which store the symbolic and geometric descriptions for manipulation instructions. The robot creates a plan to reach the specified goal based on the symbolic description in the action template headers. Robot-specific geometric reasoning modules then evaluate the geometrical descriptions of the respective action templates. In case of an error, the planner first assesses possible alternative geometric solutions before initiating backtracking to explore different solutions by re-evaluating previous actions. The procedure is described in detail in Leidner (<xref ref-type="bibr" rid="B45">2019</xref>) and the approach has been validated in multiple experimental sessions with astronauts on board the ISS (Schmaus et al., <xref ref-type="bibr" rid="B79">2019</xref>). We also extended this approach to probabilistic domains where actions can fail (Bauer et al., <xref ref-type="bibr" rid="B13">2020</xref>). This allows operators to choose between plans that reach the goal with different likelihood. Sometimes, operators might be willing to trade success probability for completion time, number of steps of the plan, or possible side effects.</p>
<p>Ongoing work focuses on how to switch from teleoperation to supervised control, which requires to update the world model according to the changes induced by the teleoperated robot. The challenges that arise during model updates and their respective solutions are the subject of the following section.</p>
</sec>
<sec>
<title>2.4. Model Update</title>
<p>The model update represents the updating of the data of the local and the remote model as well as the synchronization between these two models. Two challenges arise directly from this task. First, how can the models be synchronized even though the data of the models may be in a different structure or representation? Second, how can stability be established despite the fact that the updating process is highly nonlinear, especially in case of time delay, jitter, and packet loss?</p>
<p>In the case of supervised control, model update translates to keeping the local model (that is shared between robot and operator) in synchronization with the environment the robot is acting in. This includes detection and localization of objects, but also inference of the symbolic state. Both geometric and symbolic information are needed by the user interface for providing the user with possible actions and by the robot in order to execute those actions. A viable and pragmatic solution for this is a shared knowledge base that stores the object information and provides it to both modules in order to create a knowledge common ground. Part of this knowledge base can be geometric models, available action templates, symbolic state, and pose of the objects. In our implementation, object detection and localization are performed according to Sundermeyer et al. (<xref ref-type="bibr" rid="B89">2018</xref>). The symbolic state of the environment is evaluated based on a digital twin of the robot and the environment in simulation as described in Bauer et al. (<xref ref-type="bibr" rid="B12">2018</xref>).</p>
<p>In order to tackle the second challenge, i.e., the stability of the overall system, we research a novel control framework. The challenges of the proposed framework in terms of closed-loop stability are the fusion of different force feedback channels with computed, measured, or fictitious forces and the design of the reference position for the devices. Those challenges also include the model update, which represents a highly nonlinear functionality especially in the presence of time delay, jitter, and packet loss, making it a potential source of instability.</p>
<p>The energy-based passivity principle represents a highly modular method to assure absolute stability of complex closed-loop systems since the passivity of submodules can assure the passivity of the overall system. Thus, different modules as the force feedback channel or the haptic augmentation and shared autonomy functionalities (compare <xref ref-type="fig" rid="F3">Figure 3</xref>) can be designed and activated or deactivated, respectively, in a highly adaptive and modular manner. The fusion of different force commands to the haptic input device and remote robot can be passively designed with the help of power control units as earlier presented for multilateral telemanipulation (Panzirsch et al., <xref ref-type="bibr" rid="B57">2013</xref>), telenavigation (Panzirsch et al., <xref ref-type="bibr" rid="B62">2018b</xref>), and haptic augmentation (Panzirsch et al., <xref ref-type="bibr" rid="B58">2017</xref>, <xref ref-type="bibr" rid="B59">2018a</xref>). Exemplary, the haptic augmentation and shared autonomy modules based on local and remote models can be modeled as 1-port subsystems, which can be designed to be intrinsically passive (Weber Martins et al., <xref ref-type="bibr" rid="B95">2018</xref>) or, alternatively, passivity controllers can assure the passivity of the modules including model updates as proposed in Xu et al. (<xref ref-type="bibr" rid="B103">2015</xref>) and Panzirsch et al. (<xref ref-type="bibr" rid="B62">2018b</xref>).</p>
<p>The modularity of the passivity concept simplifies combining independently developed passive modules, since no complex stability analyses of the overall control loops are required. The remaining challenge is the passive design of prospective haptic augmentation and shared autonomy features. It should be noted, though, that passivity is in general not more conservative than the popular Lyapunov stability criterion, especially since passivity does not necessarily have to be ensured in the frequency domain, but can be guaranteed in a highly adaptive manner in the time domain.</p>
</sec>
</sec>
<sec id="s3">
<title>3. Past Telerobotic Space Missions&#x02014;Prior Milestones on the Way Toward Model-Augmented Teleoperation</title>
<p>The starting signal for DLR&#x00027;s telerobotic space missions was given in 1993 with ROTEX (<xref ref-type="fig" rid="F4">Figure 4</xref>). Since then, DLR has contributed to several telerobotic space missions in cooperation with various space agencies, in particular ESA, ROSCOSMOS, and JAXA. The most significant missions for the MATM approach are briefly described in this section. In contrast to a purely historical overview on our telerobotic missions (Artigas and Hirzinger, <xref ref-type="bibr" rid="B9">2016</xref>), this section is intended to relate to the MATM approach and to highlight the specific impact of our past missions, to synthesize the lessons learned, and to point out the challenges ahead.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>The astronaut Hans Schlegel inside the Space Shuttle Columbia (<bold>left</bold>, credit: NASA) controls the robotic gripper of the ROTEX experiment <bold>(middle)</bold>. The chaser satellite of the ETS-VII mission was equipped with a robot arm (<bold>right</bold>, credit: NASA).</p></caption>
<graphic xlink:href="frobt-08-611251-g0004.tif"/>
</fig>
<sec>
<title>3.1. Model Predictive Teleoperation&#x02014;ROTEX and ETS-VII</title>
<p>The first space robotics experiment performed by DLR was the ROTEX experiment (Hirzinger et al., <xref ref-type="bibr" rid="B37">1993</xref>) during the D2 mission in 1993 on board the Space Shuttle Columbia. A multisensory robot inside the spacecraft successfully worked in four operational modes, i.e.,</p>
<list list-type="bullet">
<list-item><p>automatic (preprogramming on ground, reprogramming from ground),</p></list-item>
<list-item><p>teleoperation on board (astronauts using stereo-TV-monitor),</p></list-item>
<list-item><p>teleoperation from ground (using predictive computer graphics) via human operators and machine intelligence as well,</p></list-item>
<list-item><p>tele-sensor-programming (learning by showing in a completely simulated world on ground including sensory perception with sensor-based execution later on board).</p></list-item>
</list>
<p>The main control concept behind all these modes was a shared autonomy approach, which includes shared control as well as shared intelligence, based on local autonomy loops on board. <xref ref-type="fig" rid="F5">Figure 5</xref> shows the overall loop structures for the sensor-based telerobotic concept.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>The overall loop structures for the sensor-based telerobotic concept of the ROTEX experiment (Hirzinger et al., <xref ref-type="bibr" rid="B37">1993</xref>). [&#x000A9;1993 IEEE. Reprinted, with permission, from (Hirzinger et al., <xref ref-type="bibr" rid="B37">1993</xref>)].</p></caption>
<graphic xlink:href="frobt-08-611251-g0005.tif"/>
</fig>
<p>Due to the large time delays of up to 7 s that were involved during operation from ground, there was no haptic feedback in the ROTEX experiment. Instead, the human operator was enclosed in the feedback loop via stereovision and 3-D graphics on a very high level but with low bandwidth, while the low-level sensory loops were closed directly at the robot on board with high bandwidth.</p>
<p>To handle the large time delay, ROTEX used a predictive computer graphics approach, which seems to be the only way to overcome this problem. A human operator at the remote workstation gave robot commands by looking at a <italic>predicted</italic> graphics model of the robot. The control commands issued to this instantaneously reacting robot simulator are sent to the remote robot as well using the time-delayed transmission links.</p>
<p>Complex tasks were split up into elemental moves, represented by a certain configuration, which allows the simulated (as well as the real) robot to refine the gross commands autonomously. We introduced the term <italic>tele-sensor-programming</italic> that means the robot is graphically guided through the task (off-line on ground), storing not only the relevant Cartesian poses of the gripper but also the corresponding nominal sensory patterns (graphically simulated) for later reference in the on-board execution phase.</p>
<p>In summary, this mode of tele-sensor-programming is a form of off-line-programming, which tries to overcome the well-known problems of conventional approaches, especially the fact that the simulated and the real world are not identical. But instead of calibrating the robot, tele-sensor-programming provides the real robot with simulated sensory data that refer to relative positions between the end-effector and the environment, thus compensating for any kind of inaccuracies in the absolute positions of the robot and the real world. Using the simulated sensor values during the programming phase can be seen as the first model-based teleoperation approach in space robotics.</p>
<p>A few years later in 1999, DLR got the chance to contribute with an own experiment (German ETS-VII Technology Experiment [GETEX]) to the Japanese ETS-VII mission, which was the first space robotics mission with a focus on on-orbit-servicing tasks. For DLR the participation was the first step to a big challenge in space robotics, i.e., the capturing and repair of a failed satellite, completely controlled remotely from ground. In that context, we performed two main tasks, first a series of dynamic experiments to verify our models of free-floating space robots and the identification of the dynamic model parameters; second&#x02014;and this is the more interesting one in the field of telerobotics&#x02014;a peg-in-hole experiment, using VR methods and a <italic>vision-and-force</italic> control scheme, by closing sensor control loops directly on board (force) and via ground communication (vision). Like in ROTEX we used the tele-sensor-programming approach to set the reference values for the visual servoing task, using some dedicated markers as image features, in a virtual environment for later usage in space. For that we developed an approach, which did not need any calibration, because it was only based on the sensor&#x02013;actor relation: the desired Cartesian goal frame of the robot&#x00027;s tool center point was expressed only by the respective visual sensory pattern (Brunner et al., <xref ref-type="bibr" rid="B21">1999</xref>).</p>
</sec>
<sec>
<title>3.2. Force-Feedback&#x02014;ROKVISS and Kontur-2</title>
<p>Launched in 2005 and operated for nearly 5 years in space, the Robotics Component Verification Experiment on the ISS (ROKVISS) was a big success for two reasons: the first aim was the in-flight verification of highly integrated modular robotic joints (<xref ref-type="fig" rid="F6">Figure 6</xref>, left), the second one the demonstration of different control modes, reaching from high system autonomy to force feedback teleoperation (telepresence mode). After ROTEX and GETEX, which did not cover any haptics, ROKVISS was designed to test and verify real telepresence operation using haptic and visual feedback at high data rates. For that the telepresence system of ROKVISS was equipped with</p>
<list list-type="bullet">
<list-item><p>a highly dynamic teleoperated robot including sensors and local intelligence,</p></list-item>
<list-item><p>a high-bandwidth real time communication channel,</p></list-item>
<list-item><p>an immersive multimodal human&#x02013;machine interface.</p></list-item>
</list>
<p>All these components had to be connected by an advanced control concept, which combined shared autonomy and bilateral control of the teleoperated robot and guaranteed a synchronicity between the visual and haptic information. The human&#x02013;machine interface played a major role for immersive telepresence. The operator should feel like being at the remote location. Therefore, DLR developed a new 2-DoF force-feedback joystick corresponding to the two joints of the manipulator in space. Providing the human operator with haptic feedback means to include the human into the control loop, i.e., the human arm was energetically coupled with the manipulator arm at the ISS. The stabilization of this coupled telemanipulation system was complicated due to the presence of time delay in the system (Hannaford and Ryu, <xref ref-type="bibr" rid="B35">2002</xref>). An advantage of ROKVISS was that the communication delay was relatively small (10&#x02013;30 ms) and predictable. This allowed to simulate additional time delays to test different control schemes and communication systems within a real space experiment (Preusche et al., <xref ref-type="bibr" rid="B67">2006</xref>).</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>The ROKVISS system mounted on a platform on the outside of the ISS (<bold>left</bold>, credit: NASA). Cosmonaut Oleg Kononenko with the Kontur-2 joystick on board the ISS (<bold>right</bold>, credit: ROSCOSMOS/O. Kononenko).</p></caption>
<graphic xlink:href="frobt-08-611251-g0006.tif"/>
</fig>
<p>Space agencies are planning and working toward crewed lunar and planetary exploration missions to be realized within the next few decades. Sending astronauts directly to the surface of the celestial bodies is extremely dangerous and costly. Therefore, in an initial, cautious step, robots can be teleoperated from an orbital spacecraft to explore the surface, acquire samples, and construct habitats. To this end, DLR and the Russian space agency ROSCOSMOS collaborated on the Kontur-2 mission during the period 2012&#x02013;2016.</p>
<p>The main goal of the Kontur-2 space mission was to test the feasibility of using force feedback teleoperation from a spacecraft in micro-gravity conditions and to telemanipulate robots on distant planets (Riecke et al., <xref ref-type="bibr" rid="B69">2016</xref>). For this, the ISS was used as the spacecraft and the Earth as planet with a robot on its surface. It was therefore an inverted scenario compared to ROKVISS. To provide high fidelity force feedback to the cosmonauts, DLR developed a space qualified force feedback joystick, which was taken on board the ISS (<xref ref-type="fig" rid="F6">Figure 6</xref>, right). A direct link over S band was used for communication between the ISS and Earth with short latency and ISS experiment windows. In spite of the short latency (10&#x02013;30 ms round-trip delay), it was observed that the bilateral controller was unstable due to the closed control loop with force feedback.</p>
<p>To reduce the performance deterioration that comes as the trade-off while ensuring stability, a novel 4-channel architecture bilateral controller was developed with passivity observers and passivity controllers as explained in section 2.1. This 4-channel bilateral controller provided a stable and highly transparent teleoperation system in spite of the communication delays and data losses and was tested in both terrestrial set-up (for cosmonaut training) and for the real space mission (Artigas et al., <xref ref-type="bibr" rid="B7">2016b</xref>). In addition to the single-operator single-robot teleoperation, further tests were conducted for cooperative grasping of objects by two users.</p>
<p>In the scenario, a cosmonaut on board the ISS and a second operator from ground (located at our project partner in Russia) teleoperated a dual arm robot at DLR to cooperatively handle a flexible sphere. In order to handle the sphere safely (without dropping it or pressing it with too high forces), the haptic intention augmentation approach explained in section 2.2 was tested and verified during the Kontur-2 mission (Panzirsch et al., <xref ref-type="bibr" rid="B58">2017</xref>). It was learned that force feedback provided the cosmonaut with a more intuitive feeling of the robot-environment interaction with which he could modulate the interaction forces more accurately as desired.</p>
<p>A series of human factors experiments was conducted within the Kontur-2 space mission, investigating the benefits of force feedback under conditions of weightlessness. Cosmonauts teleoperated the ROKVISS robot from the ISS with DLR&#x00027;s force feedback joystick. Findings indicated that force feedback is indispensable for teleoperation tasks, although the terrestrial performance level could not be reached in weightlessness. Moreover, haptic support at the joystick (e.g., motion damping) has to be adjusted to be beneficial in weightlessness conditions and higher resistive forces should be avoided (Weber et al., <xref ref-type="bibr" rid="B96">2019</xref>, <xref ref-type="bibr" rid="B98">2020</xref>; Riecke et al., <xref ref-type="bibr" rid="B70">2020</xref>).</p>
</sec>
<sec>
<title>3.3. Supervised Autonomy&#x02014;METERON SUPVIS Justin</title>
<p>Space telerobotics based on haptic telepresence provides close, immersive coupling between the user and the robotic asset. However, it presents two drawbacks: short effective operation time due to user fatigue, and difficulty to scale up (Lii et al., <xref ref-type="bibr" rid="B46">2018</xref>). METERON SUPVIS Justin was a mission to tackle these issues with a different approach to teleoperation with supervised autonomy, or shared autonomy. Rather than using the robot as a haptically coupled avatar for the user, the robots are utilized as intelligent robotic assets, or coworkers to be commanded at the task level.</p>
<p>Between 2017 and 2018, three ISS-Earth telerobotic experiment sessions were carried out with five NASA and ESA astronauts. For METERON SUPVIS Justin, an analog scenario of a Martian surface environment was implemented at DLR in Germany to be serviced by DLR&#x00027;s humanoid robot Rollin&#x00027; Justin (Borst et al., <xref ref-type="bibr" rid="B17">2007</xref>, <xref ref-type="bibr" rid="B18">2009</xref>). ISS in turn takes on the role of the orbiting spacecraft, from where the astronaut commands the robots on simulated Martian surface.</p>
<p>To test the robot&#x00027;s ability to carry out an increasing catalog of tasks that could be expected in a space habitat or colony, the SOLar farm EXperimental (SOLEX) environment was developed and constructed at DLR in Oberpfaffenhofen, Germany. The SOLEX environment is equipped with a wide array of systems and devices including solar panels, smart payload units, and a lander, which allowed for the design of different mission scenarios to be carried out by the human&#x02013;robot team (Bayer et al., <xref ref-type="bibr" rid="B14">2019</xref>).</p>
<p>Using action templates (Leidner, <xref ref-type="bibr" rid="B45">2019</xref>) as described in section 2.3, Rollin&#x00027; Justin carried out the task level commands provided by the astronaut by utilizing its local intelligence to process and execute lower level tasks. The knowledge-driven approach was also applied to the user interface design in the form of an intuitive touch screen tablet application (Schmaus et al., <xref ref-type="bibr" rid="B79">2019</xref>). Implemented on a commercial off-the-shelf (COTS) tablet PC, the application provides the crew with vital information on the mission at hand, view from Justin&#x00027;s camera, and a dynamically updated list of relevant commands. This provides an uncluttered and intuitive user interface to command a highly complex robotic asset. <xref ref-type="fig" rid="F7">Figure 7</xref> shows the user interface on the tablet PC being commanded by the ISS crew.</p>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>An example layout of the knowledge-driven intuitive tablet user interface on board the ISS (Lii et al., <xref ref-type="bibr" rid="B46">2018</xref>).</p></caption>
<graphic xlink:href="frobt-08-611251-g0007.tif"/>
</fig>
<p>Through three sessions, increasingly complex tasks were carried out: from service and inspection, to manual device adjustment and maintenance, concluding with a full set of component retrieval and assembly tasks. <xref ref-type="fig" rid="F8">Figure 8</xref> shows ESA astronaut Alexander Gerst performing component retrieval and assembly with Rollin&#x00027; Justin. Thanks to the supervised autonomy approach, all participating ISS crew members not only were able to successfully complete all assigned tasks, their feedback also indicated that they would be able to handle working with larger robotic teams to perform more complex tasks with this approach.</p>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p>ESA astronaut Alexander Gerst on board the ISS (<bold>left</bold>, credit: ESA) commanding DLR&#x00027;s Rollin&#x00027; Justin in the SOLEX environment to perform component retrieval and assembly tasks <bold>(right)</bold>.</p></caption>
<graphic xlink:href="frobt-08-611251-g0008.tif"/>
</fig>
</sec>
<sec>
<title>3.4. Telenavigation&#x02014;Analog-1</title>
<p>The Analog-1 mission (November 2019) tested geological sampling from orbit. It was intended to give insight into the feasibility of operating a robot on the surface of the moon by an astronaut aboard the Lunar Gateway, where communication latencies would be comparable to, or less than, those from ISS to ground (these were &#x02248;850 ms in the K<sub>u</sub> band link via relay satellites). In contrast to the SUPVIS-Justin experiment of the previous section, the unstructured environment and loosely defined tasks made supervised autonomy impractical. Hence, for the first time, full-DoF direct teleoperation with force feedback was tested from space to ground. The robot controlled from space was a mobile platform with two robotic manipulators and two cameras, shown in the right photo in <xref ref-type="fig" rid="F9">Figure 9</xref>. The astronaut on the ISS drove the mobile platform to three geological sampling sites (mocked-up in a hangar in the Netherlands), investigated them and collected rock samples, all while in communication with geologists.</p>
<fig id="F9" position="float">
<label>Figure 9</label>
<caption><p><bold>(Left)</bold> Astronaut Luca Parmitano used a haptic device and a joystick to control the robot arms and the mobile platform (credit: ESA). <bold>(Right)</bold> The Analog-1 mobile platform at a mocked-up geological sampling site (credit: ESA).</p></caption>
<graphic xlink:href="frobt-08-611251-g0009.tif"/>
</fig>
<p>The astronaut&#x00027;s work station consisted of a laptop to display and interact with the user interface; a Sigma.7 haptic interface device from Force Dimension (modified by the company to be used in microgravity) to command position of the tool on the manipulator and receive force feedback; and an integrated joystick with keypad to drive the platform, move the cameras, and also interact with the user interface (see <xref ref-type="fig" rid="F9">Figure 9</xref>, left). For the control, we used TDPA to deal with latency (described in section 2.1). Full details of the control are outside the scope of this paper.</p>
<p>The astronaut was able to command the robot stably, effectively, and intuitively. Despite the unstructured environment, it was clear from pre-trials with astronauts and astronaut trainers that certain maneuvers could also be automated, for example, the stowing of the rock. This begs the question of how to scale up and down levels of autonomy for different environments or tasks, with the same interface. Furthermore, possible uses of augmented reality were identified: to aid communication with scientists (during the experiment the astronaut benefited from a grid projected over the image), to aid driving under time delay (e.g., to show the projected path of the platform under the current steering angle) or in semi-autonomous driving, and to specify via points for the robot path on the camera image itself.</p>
</sec>
</sec>
<sec id="s4">
<title>4. Case Studies</title>
<p>While space missions were our original motivation for research on the MATM approach, it is evident that numerous other applications can also benefit from this approach. In this section, six exemplary use cases are presented to illustrate the wide variety of potential applications that reach from orbital applications over terrestrial telemanipulation in caregiving and telesurgery to applications that involve driving and flying robotic systems. In each use case description, special emphasis is given to the specific challenges, technical solutions, and experience gained. In none of these use cases, we have exploited the full spectrum of MATM so far, but rather emphasized certain aspects that appeared to be of particular interest for the respective use case. These foci are indicated in parentheses in the section headings.</p>
<sec>
<title>4.1. In-orbit Telemanipulation (Haptic Augmentation and Shared Control)</title>
<p>To reduce the cost and payload volume of satellites launches, the assembly of satellites may be realized in in-orbit factories (Spaceflight, <xref ref-type="bibr" rid="B88">2020</xref>). Although manufacturing in the ISS has been recently tested with 3D printing (Napoli and Kugler, <xref ref-type="bibr" rid="B53">2017</xref>), robotic assembly of satellites has not been done yet. To this end, an on-ground feasibility study was conducted within the framework of the Space Factory 4.0 project (Weber Martins et al., <xref ref-type="bibr" rid="B95">2018</xref>) for the robotic assembly of CubeSats (<xref ref-type="fig" rid="F10">Figure 10</xref>). <italic>Space Factory 4.0</italic> aimed at developing a bilateral controller, which allows for teleoperation of the assembly robot by a human operator using an HMI device, providing force feedback with the support of virtual fixtures, which in the control scheme of <xref ref-type="fig" rid="F3">Figure 3</xref>, are elements of the remote model. The virtual fixtures are dynamic and are placed on the desired point by a vision-based tracking system. The final control architecture was based on a mixed-initiative approach (see section 2.3), where the final control commands to the remote robot was a weighted sum of control commands from the teleoperator and the vision-based autonomy (virtual fixtures) with fixed authority allocation factors (Panzirsch et al., <xref ref-type="bibr" rid="B59">2018a</xref>).</p>
<fig id="F10" position="float">
<label>Figure 10</label>
<caption><p>Demonstration of a telerobotic high-precision assembly task of an electrical connector of CubeSats (Weber Martins et al., <xref ref-type="bibr" rid="B95">2018</xref>).</p></caption>
<graphic xlink:href="frobt-08-611251-g0010.tif"/>
</fig>
<p>In order to reduce the physical effort demanded from the operator while telemanipulating the remote robot using the robot-based haptic device with its high inertia, a local explicit force controller was used to match the forces measured at the human&#x02013;haptic device interface to the force measured at the remote robot&#x00027;s end effector (Balachandran et al., <xref ref-type="bibr" rid="B11">2020b</xref>). This reduced the perceived inertia of the haptic device by the operator, during free motion of the robot, and also increased the transparency during robot&#x02013;environment interaction. In addition to the feedback of the measured forces from the robot&#x00027;s end-effector, additional feedback was provided to the operator via the forces generated by the virtual fixtures. This supported the operator in gaining a better impression of the relative motion of the robot with respect to the workpiece and the target position and orientation. In a pilot study, it was found that such supportive feedback reduced the required completion time for an assembly task of CubeSat parts that required a high degree of precision (Weber Martins et al., <xref ref-type="bibr" rid="B95">2018</xref>).</p>
<p>In spite of these benefits introduced by this mixed-initiative-based shared control architecture, it was observed that if the virtual fixtures were wrongly placed during certain scenarios then the tracking system produced noisy measurements. Due to the fixed authority allocation factors that were tuned before-hand, the operator had to produce more physical effort to intervene and force the robot against the virtual fixtures. Future works include applying the adaptive shared control approach (as in Balachandran et al., <xref ref-type="bibr" rid="B10">2020a</xref>) with possibilities for human intervention with more ease along with optimal placement of the virtual fixtures using artificial intelligence and machine learning.</p>
</sec>
<sec>
<title>4.2. Caregiving (Shared Autonomy and Seamless Autonomy Activation)</title>
<p>The demographic change in most industrial countries will pose major challenges to national health-care systems and the society to be faced within the next decades. While the number of people requiring assistance and caregiving is continuously growing, the number of caregivers is not keeping up with that demand. Robotic systems can potentially contribute to bridge this gap between demand and supply (Riek, <xref ref-type="bibr" rid="B71">2017</xref>). Only recently, various robotic systems were brought to market for this purpose (Ackerman, <xref ref-type="bibr" rid="B3">2018</xref>; Gupta et al., <xref ref-type="bibr" rid="B32">2019</xref>; Mi&#x00161;eikis et al., <xref ref-type="bibr" rid="B50">2020</xref>). Such robots should be able to take over logistical tasks or assist in care or daily life. Besides technical aspects, also the simplicity and empathy in interactions are highly relevant for these assistance tasks (Pepito et al., <xref ref-type="bibr" rid="B66">2020</xref>).</p>
<p>To mitigate the demographic challenges, the prototypical <italic>SMiLE</italic><xref ref-type="fn" rid="fn0001"><sup>1</sup></xref> ecosystem has been developed as a holistic concept for robotic assistance in caregiving (Vogel et al., <xref ref-type="bibr" rid="B94">2020</xref>). This ecosystem offers a variety of control modes and autonomy levels to meet the actual application at hand. However, a 100 % reliability of the autonomous capabilities is practically unrealistic and the requirements in terms of safety are enormous since the robots are operated in direct vicinity of humans. Therefore, telepresence technologies are applied to cover several aspects (see <xref ref-type="fig" rid="F11">Figure 11</xref>). For example, in case of emergency a teleoperator can instantly gain control of the remote robot and take immediate actions before an ambulance arrives on site. Alternatively, the person in need of care can activate teleoperated human assistance, if desired, or the robot requests human support itself if the autonomous capabilities of the system do not suffice to solve a required task.</p>
<fig id="F11" position="float">
<label>Figure 11</label>
<caption><p><bold>(Left)</bold> Concept of the caregiving ecosystem <italic>SMiLE</italic>. <bold>(Right)</bold> Exemplary implementation (Vogel et al., <xref ref-type="bibr" rid="B94">2020</xref>). [&#x000A9;2020 IEEE. Reprinted, with permission, from (Vogel et al., <xref ref-type="bibr" rid="B94">2020</xref>)].</p></caption>
<graphic xlink:href="frobt-08-611251-g0011.tif"/>
</fig>
<p>The <italic>SMiLE</italic> ecosystem foresees haptic input devices to control a large variety of heterogeneous robotic agents in order to increase their reliability and efficiency. Therefore, a uniform control structure has been developed in which the robotic agents act at the users&#x00027; requests, while seamlessly switching to remote haptic teleoperation can be performed at any time. Besides the teleoperation coupling, the methods of supervised and shared autonomy are also designed in a robot agnostic way. Within the <italic>SMiLE</italic> ecosystem, these operation modes are fused with the delayed teleoperation control structure to augment the human operator with the model-based capabilities of the robot-side intelligence. Similar to autonomous functions, advanced control methods such as hierarchical whole-body control (Dietrich and Ott, <xref ref-type="bibr" rid="B25">2020</xref>), which are parameterized with the knowledge of the remote model, can help to increase the usability of the robotic system.</p>
<p>It was shown that seamless switching to teleoperation can be achieved through the application of one common Cartesian controller for teleoperation and autonomous operation modes on the remote robot side. Furthermore, in order to sequentially couple one haptic device to a variety of robotic systems and to augment the human operator with the shared-autonomy functionalities, the coupling has to be designed in the Cartesian space as well. The results of <italic>SMiLE</italic> further confirm that the shared-autonomy functionalities can be stably combined with the time-delayed telemanipulation framework if the generation of the respective fictitious force feedback is designed with passive characteristics, as was outlined in section 2.4.</p>
<p>A challenge apparent in domestic use cases is the large variety of different objects and tasks the system has to handle. Here, the human teleoperator can not only serve as a fallback solution for tasks unknown to the system but the data generated in these task executions can help to increase the functionality of the autonomous agent. To this end, we investigate task representations that enable the definition of new tasks through learning by demonstration approaches.</p>
</sec>
<sec>
<title>4.3. Telesurgery (Bilateral Control Concepts and Haptic Augmentation)</title>
<p>The demographic change and the accompanied continuous development of medical technology to enable high quality of life is an important driving factor for surgical robotics technology. Goals of robotically assisted surgical systems (RASS) are manifold, ranging within the enhancement of surgical treatments in terms of safety for patients and clinicians, patient outcome, and short convalescence. Already in the 1970s first concepts of RASS were considered based on telemanipulation (Alexander, <xref ref-type="bibr" rid="B4">1973</xref>). Nowadays more than 7 million procedures have been performed assisted by RASS, many of them by the <italic>da Vinci Surgical System (Intuitive Surgical Inc.)</italic>, which embodies a telemanipulation system, similar to the envisioned system of the 1970s (Klodmann et al., <xref ref-type="bibr" rid="B42">2020</xref>).</p>
<p>Since the 1990s DLR contributes to this field, e.g., one of the most mature research platforms for telemanipulation in robotic surgery, the <italic>DLR MiroSurge System</italic>, was developed (<xref ref-type="fig" rid="F12">Figure 12</xref>) (Hagn et al., <xref ref-type="bibr" rid="B33">2010</xref>; Seibold et al., <xref ref-type="bibr" rid="B81">2018</xref>). The modular patient-side manipulator consists of one to multiple bed-mounted 7-DoF <italic>DLR MIRO</italic> robot arms. One arm is equipped with a stereo endoscope and the others carry various types of articulated, wristed instruments (<italic>DLR MICA</italic>). The surgeon console incorporates a stereo display to visualize the situs of the patient in 3-D and two <italic>sigma.7 (force dimension)</italic> haptic devices are used as input devices (Tobergte et al., <xref ref-type="bibr" rid="B92">2011</xref>). The system is the institute&#x00027;s core platform to research surgical robotics in interdisciplinary collaborations with industry, clinics, and complementary research institutions (MIRO Innovation Lab, <xref ref-type="bibr" rid="B49">2017</xref>). Besides a seamless integration of RASS into a digitalized hospital infrastructure, the focus areas of research range from the acquisition and natural presentation of information from the situs to the surgeon, over enhancing the surgeon&#x00027;s dexterity inside the patient, while keeping the trauma low and providing natural controls, which assist with individualized and task-dependent assistance functions, e.g., utilizing virtual fixtures, shared control, or semi-autonomous functions, to further decrease the physician&#x00027;s cognitive workload (<xref ref-type="fig" rid="F12">Figure 12</xref> and sections 2.2 and 2.3).</p>
<fig id="F12" position="float">
<label>Figure 12</label>
<caption><p><bold>(Left)</bold> Focus areas of surgical robotics research. (translated by permission from Springer Nature Customer Service Centre GmbH: Springer Nature, (Klodmann et al., <xref ref-type="bibr" rid="B42">2020</xref>), &#x000A9;2020). <bold>(Right)</bold> DLR <italic>MiroSurge</italic> research system for telemanipulation in robot-assisted laparoscopic surgery. (&#x000A9;Alexandra Beier/DLR, CC BY 3.0).</p></caption>
<graphic xlink:href="frobt-08-611251-g0012.tif"/>
</fig>
<p>The basic control architecture maps the user inputs to joint positions of the patient-side manipulators by an inverse kinematics algorithm accounting for workspace constraints, singularities, and redundancy. This basic control architecture is continuously enhanced by different MATM-based approaches, as described in the following paragraphs.</p>
<p>Different passivity-based force feedback control approaches to increase the system&#x00027;s transparency, e.g., by downscaling the felt inertia and friction and dealing with other disturbances, were developed (Tobergte et al., <xref ref-type="bibr" rid="B92">2011</xref>; Tobergte and Albu-Sch&#x000E4;ffer, <xref ref-type="bibr" rid="B90">2012</xref>; Tobergte and Helmer, <xref ref-type="bibr" rid="B91">2013</xref>). Even though many studies show that force/torque feedback might increase also surgical performance (Weber and Schneider, <xref ref-type="bibr" rid="B99">2014</xref>), cost effective, sterilizable sensors integrable directly at the instrument tips are still not commercially available.</p>
<p>Haptically augmented workspace limits (e.g., of the haptic device, the remote manipulators or task-dependent constraints, such as the incision point constraint), limit-indexing or velocity scaling approaches support the safe and efficient control of the system. To appropriately configure and parameterize these features, user-studies based on best practices and standards of human factors are conducted and are generally recommendable (Nitsch et al., <xref ref-type="bibr" rid="B54">2012</xref>; Weber et al., <xref ref-type="bibr" rid="B97">2013</xref>).</p>
<p>A rich set of geometric primitives is implemented to provide task-related and haptically augmented virtual fixtures that are intended to finally enhance the surgeon&#x00027;s capabilities, e.g., by guiding toward or along target tissues or preventing unintentionally injuring critical anatomical structures. Perceiving the patient&#x00027;s situs accurately and reactively update the robot&#x00027;s knowledge or rather representation of patient and procedure (section 2.4) to appropriately configure and parameterize these features embody some still open research questions to finally integrate the concept of task-dependent assistance functions into realistic scenarios in robot-assisted laparoscopic surgery.</p>
</sec>
<sec>
<title>4.4. Telenavigation (Haptic Augmentation and Model-Mediated Telemanipulation)</title>
<p>The ambitious future of planetary exploration has the potential to push the boundaries of technological advancements. However, nondeterministic remote environment at communication delay might render full autonomy and supervised control as an unfeasible feat with laborious task execution times. Instead adding human to the loop, to telenavigate the robot, can bypass many of the task requirements especially in the fields of perception and cognition, and ensure safety. Unlike telemanipulation, telenavigation benefits from velocity as the command signal, instead of position. Nonetheless, it too presents us with the trade-off between performance and stability.</p>
<p>To examine the effect of such a trade-off, a recent study (Sierotowicz et al., <xref ref-type="bibr" rid="B83">2020</xref>) was conducted to telenavigate a Light Weight Rover (LRU; Wedler et al., <xref ref-type="bibr" rid="B100">2015</xref>) with and without delay, via a 2-DoF DLR force feedback joystick (Riecke et al., <xref ref-type="bibr" rid="B69">2016</xref>) by using a predictive polygon-based approach with a car-like interface (Panzirsch et al., <xref ref-type="bibr" rid="B62">2018b</xref>) (<xref ref-type="fig" rid="F13">Figure 13</xref>). TDPA was extended and used as a tool to passivate the active communication channel, which injects energy to the system due to delay. The human operator commanded longitudinal velocity and lateral curvature to the LRU, by pressing the dead-man switch on the joystick, and in return received a fictitious force feedback computed by the overlapping of polygons with the obstacles in a danger map. The danger map of the remote terrain is generated by classifying the traversability based on the depth data acquired by the LRU&#x00027;s pan/tilt stereo camera system (Brand et al., <xref ref-type="bibr" rid="B20">2014</xref>), and is the local model that was used to generate haptic feedback (see section 2.2). A passive model update was achieved by Panzirsch et al. (<xref ref-type="bibr" rid="B62">2018b</xref>) (see section 2.4), which makes this a favorable approach in terms of applicability to a large variety of feedback generation types.</p>
<fig id="F13" position="float">
<label>Figure 13</label>
<caption><p><bold>(Left)</bold> Schematic showing the generation of fictitious forces by polygons overlapping with obstacle in the danger map [&#x000A9;2020 IEEE. Reprinted, with permission, from Panzirsch et al. (<xref ref-type="bibr" rid="B62">2018b</xref>)]. <bold>(Top Right)</bold> Screenshot of the user interface. <bold>(Bottom Right)</bold> LRU with augmented polygons in the experimental environment [&#x000A9;Sierotowicz et al. (<xref ref-type="bibr" rid="B83">2020</xref>), CC BY 4.0, images have been modified].</p></caption>
<graphic xlink:href="frobt-08-611251-g0013.tif"/>
</fig>
<p>The main findings of the user study is that force feedback significantly improves navigation performance in the proximity of obstacles, although navigation is slower. The positive effect of the force feedback was evident in conditions without and with a communication delay of 800 ms. Altogether, these results show that a fictitious force feedback approach based on a TPDA controller is beneficial in difficult terrain and in the presence of substantial communication delay.</p>
<p>Apart from collision avoidance, the predictive polygon method could also help maintain a certain &#x0201C;safe&#x0201D; distance for the LRU from its environment. Since the width of the predictive polygons is a tunable factor, it can be adjusted to increase or decrease the safety factor or to allow/restrict the LRU&#x00027;s movement through narrow canyon-like environment. Despite of rate control, the TDPA could effectively stabilize and provide valuable force feedback with minimized position drift to the human operator. Thus, the haptic augmentation was beneficial with regard to navigation accuracy for demanding telenavigation tasks.</p>
<p>The 2-D danger map considers any object above a certain height as an obstacle. Thus, this would be impossible to tune when the LRU is traversing an unstructured environment. Therefore, a 2.5-D danger map with annotations would give more freedom to the operator and allow driving over small pebbles, grass, uneven roads, etc. Although a 2-DoF joystick could be used to maneuver the LRU with a car like interface, a 3-DoF haptic joystick could be used to fully explore LRU&#x00027;s potential of rear steering capabilities for crab-like and sideway motions.</p>
</sec>
<sec>
<title>4.5. Aerial Manipulation (Hierarchical Bilateral Teleoperation and Haptic Augmentation)</title>
<p>The use of aerial manipulators, i.e., unmanned aerial vehicles (UAVs) with attached robotic arms, allows for significant improvements in the reachability and versatility of manipulation tasks. Among other functionalities, such systems are able to perform inspection and maintenance tasks in high or inaccessible scenarios (e.g., oil refineries; Ollero et al., <xref ref-type="bibr" rid="B55">2018</xref>). In order to exploit such systems while taking advantage of human capabilities in terms of perception and cognition, bilateral aerial teleoperation arises as a reasonable solution. In that scope, providing the user with camera images and/or virtual reality has been shown an essential feature for the successful fulfillment of the teleoperation task (Coelho et al., <xref ref-type="bibr" rid="B23">2020</xref>; Lee et al., <xref ref-type="bibr" rid="B44">2020</xref>).</p>
<p>Within the class of aerial manipulators, those presenting kinematic redundancy like the DLR Suspended Aerial Manipulator (SAM; Sarkisov et al., <xref ref-type="bibr" rid="B78">2019</xref>; see <xref ref-type="fig" rid="F14">Figure 14</xref>) are able to allow the user to not only control the robotic arm, but also steer the UAV (also called flying base) to achieve a desired camera view of the task being performed. Nevertheless, two main issues arise in that application. First, suitable control strategies have to be applied in order to ensure a strict hierarchy between the manipulation and the vision task, i.e., such that the flying base can move without disturbing the manipulation task. Additionally, as the traditional TDPA method is not capable of dealing with such a hierarchy in the presence of time delays, an extension thereof has to be applied.</p>
<fig id="F14" position="float">
<label>Figure 14</label>
<caption><p><bold>(Left)</bold> Concept of the whole-body teleoperation approach for the SAM. [&#x000A9;Coelho et al. (<xref ref-type="bibr" rid="B22">2021</xref>), CC BY 4.0]. <bold>(Right)</bold> Experimental setup, showing the robot, the ground station and the view provided to the user [&#x000A9;2020 IEEE. Reprinted, with permission, from Lee et al. (<xref ref-type="bibr" rid="B44">2020</xref>)].</p></caption>
<graphic xlink:href="frobt-08-611251-g0014.tif"/>
</fig>
<p>An initial approach to cope with the aforementioned issues was introduced in Coelho et al. (<xref ref-type="bibr" rid="B23">2020</xref>) and a complete solution was subsequently presented in Coelho et al. (<xref ref-type="bibr" rid="B22">2021</xref>). A conceptual view of the presented approach is shown in <xref ref-type="fig" rid="F14">Figure 14</xref> together with an overview of an experimental scenario. Using the proposed approach, the user was able to choose to command either vision or manipulation task while the other task was autonomously controlled to keep the last commanded pose in a shared-control fashion. As the vision task was restricted to the motion subset where the end-effector is not disturbed, a haptic concept called <italic>Null-Space Wall</italic> was created to inform the user when the limits of that subset were reached. Moreover, the extended TDPA ensured the system passivity in simulations with up to 300 ms round-trip delay as well as in a real scenario, where command and feedback signals were exchanged through a wireless network with time delay, package loss, and jitter. The user was able to successfully perform pick-and-place tasks while keeping the manipulator and the object in the field of view. In addition, it was found that moving the flying base to align the camera image with the command directions of the input device can significantly decrease the task-completion time as well as the mental effort.</p>
<p>A current limitation of the proposed approach is that it does not take into account the constraints imposed by the cable system on the SAM. Therefore, it is only guaranteed to work when the oscillations of the base are negligible. An extension of the approach to deal with such constraints is planned for the near future. In addition, the visual-inertial odometry-based approach presented in Lee et al. (<xref ref-type="bibr" rid="B44">2020</xref>) to create a 3-D virtual-reality environment will be extended with haptic rendering capabilities. Moreover, the multilateral haptic augmentation method based on virtual grasping points (see section 2.2) could be especially meaningful in the described setup for the cases when the flying robot needs to keep some distance from obstacles. In that case, the robot grasping point on the manipulated object can be distant from the environment interaction point of the object, which can be chosen as the virtual grasping point.</p>
</sec>
<sec>
<title>4.6. On-Orbit Servicing (Shared Control)</title>
<p>Mitigation of space debris and servicing of dysfunctional satellites have driven space agencies and companies toward the concept of robotic on-orbit servicing (Miller et al., <xref ref-type="bibr" rid="B48">1982</xref>). The term On-Orbit Servicing (OOS) refers to the maintenance in orbit, including assembly, refueling, and repair of defective satellites to extend their lifetime and to actively remove the space debris with a controlled re-entry into the Earth&#x00027;s atmosphere. To this end, space robotic projects consider the employment of a manipulator arm attached to a new satellite to implement the multiple phases of an OOS mission, namely, approaching a target satellite, followed by grasping, stabilization, docking, and finally servicing. In order to test and validate the low-level and high-level control strategies (aimed to be used in micro-gravity conditions) prior to the real mission, an on-ground facility to simulated the free-floating nature of the satellites. To achieve this, the OOS Simulator (OOS-Sim) hardware facility (<xref ref-type="fig" rid="F15">Figure 15</xref>) has been developed at DLR, which comprises of two large industrial robots that simulate micro-gravity environment using model-based dynamic simulation for the satellite mock-ups attached to their end-effectors. The servicer satellite is equipped with a robot arm to service the target satellite mock-up (Artigas et al., <xref ref-type="bibr" rid="B8">2015</xref>).</p>
<fig id="F15" position="float">
<label>Figure 15</label>
<caption><p><bold>(Left)</bold> The On-Orbit Servicing facility at DLR. <bold>(Right)</bold> The haptic device and the remote robot are of the same type of collaborative robot.</p></caption>
<graphic xlink:href="frobt-08-611251-g0015.tif"/>
</fig>
<p>The manipulator arm attached to the satellite mock-up can be controlled using vision-based semi-autonomous control with the stereo camera set-up at the end-effector of the robotic arm. This semi-autonomous approach relies highly on perception of the target satellite, which is affected by internal and external factors such as camera noise, close range vision degradation, illumination changes, and reflections among many other (Schmidt et al., <xref ref-type="bibr" rid="B80">2016</xref>). These factors might lead to a failure in the task execution by the autonomous system, and a human in the loop supervision is always preferred due to the critical nature of the orbital robotic missions.</p>
<p>To enable human intervention in the event of autonomy failure, the OOS-Sim also features teleoperation modality using a haptic device with which an operator can control the manipulator on the servicer satellite. To validate orbital teleoperation tasks in Low Earth Orbits (LEO) satellites with long operation windows using the OOS-Sim facility, experiments were conducted with the ASTRA GEO satellite acting as a relay system for the signals from the operator and the OOS-Sim manipulator, where the round-trip delay was 270 ms with standard deviation of 3 ms and a mean data loss of 24%. It was presented in Artigas et al. (<xref ref-type="bibr" rid="B6">2016a</xref>) that grasping and stabilization of the free-floating OOS-Sim target using the servicer manipulator with teleoperation is feasible even under large time delays and data losses.</p>
</sec>
</sec>
<sec sec-type="conclusions" id="s5">
<title>5. Conclusion</title>
<p>Traditional bilateral teleoperation proved to be feasible up to time delays of several seconds round-trip (see section 2.1). However, task execution then becomes extremely difficult and slow. Particularly in scenarios with such long delays, a support of the operator by suitable technologies can be of great value. This paper introduced the MATM approach, which aims to enable efficient operator-assisting telemanipulation. The concept encompasses and generalizes previous approaches for enhanced telemanipulation, in particular model-mediated telemanipulation, shared autonomy, and augmented haptics. The approach employs two kinds of models to achieve this goal and to augment both the feedback to the operator and the commands for the teleoperated remote robot. In particular, a remote model enables a shared autonomous functionality of the teleoperated robot, while a local model aims at generating an assistive augmented haptic feedback to the human operator. This scope makes the MATM approach one of the most comprehensive and powerful, but also one of the most technologically sophisticated and challenging telemanipulation approaches.</p>
<p>In a historical retrospective of our past telerobotic space missions, the way to this technology was described and the challenges we encountered during these missions were highlighted. The biggest challenge of the first missions we participated in, ROTEX and EST VII, was to overcome the hurdles that were imposed by the low computing power at that time that led to long time delays. Since these delays made closed-loop telemanipulation with force feedback impossible, our research concentrated on shared control and model prediction. Later, in ROKVISS and Kontur-2, the development of a control approach that allows stable and transparent bilateral telemanipulation despite delay, loss, and jitter of communication packets became the main focus of our research activities on telemanipulation. During the latter mission, basic research on the design of optimal haptic feedback was also conducted. In the more recent METERON mission, supervised autonomous operation was evaluated using a humanoid robot as an exemplary execution platform. It turned out during this mission that such an autonomous functionality can provide a great relieve for operating a robot and even allows for parallel operation of several robots. However, it also showed the limitation of autonomous operation especially in unstructured environments. Without human perception and cognition, a robot system will in the near future not be able to operate autonomously during a whole mission, although autonomy can already perform some specific robotic tasks today. These results suggest to combine autonomy and telemanipulation in an advantageous way, which is realized in particular with the remote model of MATM. In the recent Analog-1 mission, the telemanipulation technologies for the telenavigation of a rover through an unstructured environment and for the teleoperation of a robot arm mounted on this rover were evaluated. It could be shown for the first time that full-DoF direct teleoperation with force feedback can be robustly established for such a system and underlines the benefit of haptic feedback over open-loop teleoperation.</p>
<p>While these space missions were the main driver for our research on telemanipulation, the technology also has enormous potential for other applications, which was highlighted on the basis of six use cases. These use cases unveiled the potential and limitations of the MATM approach in the applications that reach from orbital applications over terrestrial telemanipulation in caregiving and telesurgery to applications that involve driving and flying robotic systems. In none of these use cases have we exploited the full spectrum of MATM so far, but rather emphasized certain aspects of it. The following lines give an overview of the most important results.</p>
<p>Haptic augmentation methods, in particular task- and system-related virtual fixtures demonstrated their usefulness in telesurgery, caregiving, and orbital robotics. In telenavigation, a predictive polygon method helped to maintain a certain &#x0201C;safe&#x0201D; distance for DLR&#x00027;s rover LRU from its environment and therefore to avoid collisions. We identified an enormous potential in making haptic augmentation methods more flexible, which could be achieved by parameterizing them manually using human intervention or automatically by machine learning methods.</p>
<p>While in the presented use case of aerial manipulation, the autonomous functionality took control of a subtask and was thus used to support the operator, the shared control for on-orbit servicing showed that even proactive autonomous trajectory generation is comprehensible and clearly supportive for the operator. The mixed-initiative-based shared control present certain limitations, particularly if object recognition is affected by camera noise, close range vision degradation, illumination changes, and reflections. More adaptive approaches need to be developed in future enabling easier human intervention or automatic adaption of authority. With regard to the caregiving use case, it is apparent that the shared control approach is currently only able to handle objects that are previously known to the algorithm. To overcome this limitation, in the future new objects could be self-learned by learning-by-demonstration approaches.</p>
<p>With regard to stability, we could confirm in the caregiving use case that the combination of shared-autonomy functionalities and a time-delayed telemanipulation framework becomes stable if the respective fictitious force feedback is designed with passive features. All our available MATM modules for delayed teleoperation, bilateral, or multilateral haptic augmentation methods and model updates were implemented on the basis of passive modules, which can be almost arbitrarily combined without further stability considerations. Furthermore, seamless switching from autonomy to telemanipulation or between different teleoperated robots was enabled by a coupling control structure that can be readily transferred to comparable telerobotic systems as well.</p>
<p>Beyond these lessons learned and the challenges identified, a number of other important questions remain for future work, in particular to enable the MATM approach to be realized as a whole, incorporating all of its technologies. A robust and powerful solution for updating the symbolic models for supervised autonomy especially during the teleoperation phase is still an active topic of research. In relation to this task, the synchronization between the local and the remote model also needs to be developed. The passivity principle, which we applied to achieve stability, needs to be validated as a suitable tool for a general control framework to allow easy extension of the MATM approach for new applications and robots. Furthermore, a new transparency metric would be useful for comparing MATM with direct teleoperation methods and model-mediated teleoperation. Finally, as a big challenge remains the design of a user interface involving graphical, audio, and haptic channels that provides the operator access to all model-augmentation functionalities and control modalities reaching from direct teleoperation to supervised autonomy. While MATM has not been implemented as a whole, it has already proven its usefulness in numerous applications and plays an important role as intermediate step toward supervised and fully autonomous robots.</p>
</sec>
<sec sec-type="data-availability-statement" id="s6">
<title>Data Availability Statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author/s.</p>
</sec>
<sec id="s7">
<title>Ethics Statement</title>
<p>Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec id="s8">
<title>Author Contributions</title>
<p>TH: contributed to all sections. AA-S: wrote section 1. GH: wrote section 3.1. CO: wrote section 2.1. DL: wrote sections 2.3 and 2.4. AD, JV, KH, AH, and JK: wrote section 4.3. RL: wrote section 4.6. MS: wrote section 4.4. AB: wrote sections 1, 2.3, and 2.4. GQ: wrote sections 2.2, 2.3, and 2.4. NL: wrote section 3.3. BB: wrote sections 3.1 and 3.2. CR: wrote section 3.2. NB: wrote section 2.1. BW: wrote sections 3.2, 4.3, and 4.4. AP: wrote section 3.4. AC: wrote section 4.5. RB: wrote sections 3.2, 4.1, 4.4, and 4.6. HS: wrote sections 1, 2.1, 4.4, and 5. MP: wrote sections 2.1 and 5. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
</body>
<back>
<ack><p>The authors would like to thank Jordi Artigas, Katharina Hertkorn, Philipp Kremer, Carsten Preusche, Mikel Sagardia, and Simon Sch&#x000E4;tzle for the discussions and ideas about the MATM concept, and also thank Tilo W&#x000FC;sthoff and Markus Grebenstein for their illustration of DLR&#x00027;s robotic vision in <xref ref-type="fig" rid="F1">Figure 1</xref>. The authors would also like to express our deep appreciation for the close collaboration in METERON SUPVIS Justin and Analog-1, with our partners at the ESA Human Robot Interaction Laboratory headed by Thomas Krueger. Greatest appreciation also applies to our other partners at ESA, JAXA, and ROSCOSMOS for the productive cooperation.</p></ack>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="web"><person-group person-group-type="author"><collab>cyberneticzoo.com</collab></person-group> (<year>2014</year>). <source>1948-GE Master-Slave Manipulator-John Payne</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://cyberneticzoo.com/teleoperators/1948-ge-master-slave-manipulator-john-payne-american">http://cyberneticzoo.com/teleoperators/1948-ge-master-slave-manipulator-john-payne-american</ext-link> (accessed August 5, 2020).</citation></ref>
<ref id="B2">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Aarno</surname> <given-names>D.</given-names></name> <name><surname>Ekvall</surname> <given-names>S.</given-names></name> <name><surname>Kragic</surname> <given-names>D.</given-names></name></person-group> (<year>2005</year>). <article-title>Adaptive virtual fixtures for machine-assisted teleoperation tasks</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Barcelona</publisher-loc>), <fpage>1139</fpage>&#x02013;<lpage>1144</lpage>.</citation></ref>
<ref id="B3">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Ackerman</surname> <given-names>E.</given-names></name></person-group> (<year>2018</year>). <source>Moxi Prototype From Diligent Robotics Starts Helping Out in Hospitals</source>. <publisher-name>IEEE Spectrum</publisher-name>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://spectrum.ieee.org/automaton/robotics/industrial-robots/moxi-prototype-fromdiligent-robotics-starts-helping-out-in-hospitals">https://spectrum.ieee.org/automaton/robotics/industrial-robots/moxi-prototype-fromdiligent-robotics-starts-helping-out-in-hospitals</ext-link></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alexander</surname> <given-names>A. D.</given-names></name></person-group> (<year>1973</year>). <source>On Theory and Practice of Robots and Manipulators</source>. Springer, <fpage>121</fpage>&#x02013;<lpage>136</lpage>.</citation></ref>
<ref id="B5">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Anderson</surname> <given-names>R. J.</given-names></name></person-group> (<year>1994</year>). <article-title>Teleoperation with virtual force feedback</article-title>, in <source>Experimental Robotics III</source>, eds <person-group person-group-type="editor"><name><surname>Yoshikawa</surname> <given-names>T.</given-names></name> <name><surname>Miyazaki</surname> <given-names>F.</given-names></name></person-group> (<publisher-loc>Berlin; Heidelberg</publisher-loc>: <publisher-name>Springer Berlin Heidelberg</publisher-name>), <fpage>366</fpage>&#x02013;<lpage>375</lpage>. <pub-id pub-id-type="doi">10.1007/BFb0027608</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Artigas</surname> <given-names>J.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>De Stefano</surname> <given-names>M.</given-names></name> <name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>Lampariello</surname> <given-names>R.</given-names></name> <name><surname>Albu-Schaeffer</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2016a</year>). <article-title>Teleoperation for on-orbit servicing missions through the astra geostationary satellite</article-title>, in <source>IEEE Aerospace Conference</source> (<publisher-loc>Big Sky</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1109/AERO.2016.7500785</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Artigas</surname> <given-names>J.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Riecke</surname> <given-names>C.</given-names></name> <name><surname>Stelzer</surname> <given-names>M.</given-names></name> <name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Ryu</surname> <given-names>J.-H.</given-names></name> <etal/></person-group>. (<year>2016b</year>). <article-title>Kontur-2: force-feedback teleoperation from the international space station</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Stockholm</publisher-loc>), <fpage>1166</fpage>&#x02013;<lpage>1173</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA.2016.7487246</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Artigas</surname> <given-names>J.</given-names></name> <name><surname>De Stefano</surname> <given-names>M.</given-names></name> <name><surname>Rackl</surname> <given-names>W.</given-names></name> <name><surname>Lampariello</surname> <given-names>R.</given-names></name> <name><surname>Brunner</surname> <given-names>B.</given-names></name> <name><surname>Bertleff</surname> <given-names>W.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>The oos-sim: an on-ground simulation facility for on-orbit servicing robotic operations</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Seattle, WA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>2854</fpage>&#x02013;<lpage>2860</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA.2015.7139588</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Artigas</surname> <given-names>J.</given-names></name> <name><surname>Hirzinger</surname> <given-names>G.</given-names></name></person-group> (<year>2016</year>). <article-title>A brief history of DLR&#x00027;s space telerobotics and force feedback teleoperation</article-title>. <source>Acta Polytech. Hungar</source>. <volume>13</volume>, <fpage>239</fpage>&#x02013;<lpage>249</lpage>. <pub-id pub-id-type="doi">10.12700/APH.13.1.2016.1.16</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Mishra</surname> <given-names>H.</given-names></name> <name><surname>Cappelli</surname> <given-names>M.</given-names></name> <name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Secchi</surname> <given-names>C.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2020a</year>). <article-title>Adaptive authority allocation in shared control of robots using Bayesian filters</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Paris</publisher-loc>). <pub-id pub-id-type="doi">10.1109/ICRA40945.2020.9196941</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Ryu</surname> <given-names>J.-H.</given-names></name> <name><surname>Jorda</surname> <given-names>M.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name> <name><surname>Albu-Schaeffer</surname> <given-names>A.</given-names></name></person-group> (<year>2020b</year>). <article-title>Closing the force loop to enhance transparency in time-delayed teleoperation</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Paris</publisher-loc>). <pub-id pub-id-type="doi">10.1109/ICRA40945.2020.9197420</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Bauer</surname> <given-names>A. S.</given-names></name> <name><surname>Schmaus</surname> <given-names>P.</given-names></name> <name><surname>Albu-Sch&#x000E4;ffer</surname> <given-names>A.</given-names></name> <name><surname>Leidner</surname> <given-names>D.</given-names></name></person-group> (<year>2018</year>). <article-title>Inferring semantic state transitions during telerobotic manipulation</article-title>, in <source>IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</source> (<publisher-loc>Madrid</publisher-loc>), <fpage>5517</fpage>&#x02013;<lpage>5524</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2018.8594458</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Bauer</surname> <given-names>A. S.</given-names></name> <name><surname>Schmaus</surname> <given-names>P.</given-names></name> <name><surname>Stulp</surname> <given-names>F.</given-names></name> <name><surname>Leidner</surname> <given-names>D.</given-names></name></person-group> (<year>2020</year>). <article-title>Probabilistic effect prediction through semantic augmentation and physical simulation</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Paris</publisher-loc>). <pub-id pub-id-type="doi">10.1109/ICRA40945.2020.9197477</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Bayer</surname> <given-names>R.</given-names></name> <name><surname>Schmaus</surname> <given-names>P.</given-names></name> <name><surname>Pfau</surname> <given-names>M.</given-names></name> <name><surname>Pleintinger</surname> <given-names>B.</given-names></name> <name><surname>Leidner</surname> <given-names>D.</given-names></name> <name><surname>Wappler</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Deployment of the solex environment for analog space telerobotics validation</article-title>, in <source>Int. Astronautical Congress (IAC)</source> (<publisher-loc>Washington, DC</publisher-loc>: <publisher-name>IAF</publisher-name>).</citation></ref>
<ref id="B15">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Beik-Mohammadi</surname> <given-names>H.</given-names></name> <name><surname>Kerzel</surname> <given-names>M.</given-names></name> <name><surname>Pleintinger</surname> <given-names>B.</given-names></name> <name><surname>Hulin</surname> <given-names>T.</given-names></name> <name><surname>Reisich</surname> <given-names>P.</given-names></name> <name><surname>Schmidt</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Model mediated teleoperation with a hand-arm exoskeleton in long time delays using reinforcement learning</article-title>, in <source>IEEE International Symposium in Robot and Human Interactive Communication (Ro-Man)</source> (<publisher-loc>Naples, FL</publisher-loc>). <pub-id pub-id-type="doi">10.1109/RO-MAN47096.2020.9223477</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Birkenkampf</surname> <given-names>P.</given-names></name> <name><surname>Leidner</surname> <given-names>D.</given-names></name> <name><surname>Borst</surname> <given-names>C.</given-names></name></person-group> (<year>2014</year>). <article-title>A knowledge-driven shared autonomy human-robot interface for tablet computers</article-title>, in <source>2014 IEEE-RAS International Conference on Humanoid Robots</source> (<publisher-loc>Madrid</publisher-loc>), <fpage>152</fpage>&#x02013;<lpage>159</lpage>. <pub-id pub-id-type="doi">10.1109/HUMANOIDS.2014.7041352</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Borst</surname> <given-names>C.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name> <name><surname>Wimb&#x000F6;ck</surname> <given-names>T.</given-names></name> <name><surname>Brunner</surname> <given-names>B.</given-names></name> <name><surname>Zacharias</surname> <given-names>F.</given-names></name> <name><surname>B&#x000E4;uml</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2007</year>). <article-title>A humanoid upper body system for two-handed manipulation</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Roma</publisher-loc>), <volume>Vol. 2</volume>, <fpage>2766</fpage>&#x02013;<lpage>2767</lpage>. <pub-id pub-id-type="doi">10.1109/ROBOT.2007.363886</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Borst</surname> <given-names>C.</given-names></name> <name><surname>Wimb&#x000F6;ck</surname> <given-names>T.</given-names></name> <name><surname>Schmidt</surname> <given-names>F.</given-names></name> <name><surname>Fuchs</surname> <given-names>M.</given-names></name> <name><surname>Brunner</surname> <given-names>B.</given-names></name> <name><surname>Zacharias</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>Rollin&#x00027; justin - mobile platform with variable base</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Kobe</publisher-loc>), <fpage>1597</fpage>&#x02013;<lpage>1598</lpage>. <pub-id pub-id-type="doi">10.1109/ROBOT.2009.5152586</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bowyer</surname> <given-names>S. A.</given-names></name> <name><surname>Davies</surname> <given-names>B. L.</given-names></name> <name><surname>y Baena</surname> <given-names>F. R.</given-names></name></person-group> (<year>2013</year>). <article-title>Active constraints/virtual fixtures: a survey</article-title>. <source>IEEE Trans. Robot</source>. <volume>30</volume>, <fpage>138</fpage>&#x02013;<lpage>157</lpage>. <pub-id pub-id-type="doi">10.1109/TRO.2013.2283410</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Brand</surname> <given-names>C.</given-names></name> <name><surname>Schuster</surname> <given-names>M. J.</given-names></name> <name><surname>Hirschm&#x000FC;ller</surname> <given-names>H.</given-names></name> <name><surname>Suppa</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>Stereo-vision based obstacle mapping for indoor/outdoor slam</article-title>, in <source>IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</source> (<publisher-loc>Chicago, IL</publisher-loc>), <fpage>1846</fpage>&#x02013;<lpage>1853</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2014.6942805</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Brunner</surname> <given-names>B.</given-names></name> <name><surname>Landzettel</surname> <given-names>K.</given-names></name> <name><surname>Schreiber</surname> <given-names>G.</given-names></name> <name><surname>Steinmetz</surname> <given-names>B. M.</given-names></name> <name><surname>Hirzinger</surname> <given-names>G.</given-names></name></person-group> (<year>1999</year>). <article-title>A universal task level ground control and programming system for space robot applications - the MARCO concept and it&#x00027;s application to the ETS VII project</article-title>, in <source>International Symposium on Artifical Intelligence, Robotics, and Automation in Space (i-SAIRAS)</source> (<publisher-loc>Noordwijk</publisher-loc>), <fpage>507</fpage>&#x02013;<lpage>514</lpage>.</citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coelho</surname> <given-names>A.</given-names></name> <name><surname>Sarkisov</surname> <given-names>Y.</given-names></name> <name><surname>Wu</surname> <given-names>X.</given-names></name> <name><surname>Mishra</surname> <given-names>H.</given-names></name> <name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Dietrich</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Whole-body teleoperation and shared control of redundant robots with applications to aerial manipulation</article-title>. <source>J. Intell. Robot. Syst.</source> <volume>102</volume>, <fpage>1</fpage>&#x02013;<lpage>22</lpage>. <pub-id pub-id-type="doi">10.1007/s10846-021-01365-7</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Coelho</surname> <given-names>A.</given-names></name> <name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Kondak</surname> <given-names>K.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name></person-group> (<year>2020</year>). <article-title>Whole-body bilateral teleoperation of a redundant aerial manipulator</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Paris</publisher-loc>), <fpage>9150</fpage>&#x02013;<lpage>9156</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA40945.2020.9197028</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Coelho</surname> <given-names>A.</given-names></name> <name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Muskardin</surname> <given-names>T.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Kondak</surname> <given-names>K.</given-names></name></person-group> (<year>2018</year>). <article-title>Smoother position-drift compensation for time domain passivity approach based teleoperation</article-title>, in <source>IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</source> (<publisher-loc>Madrid</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>5525</fpage>&#x02013;<lpage>5532</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2018.8594125</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dietrich</surname> <given-names>A.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name></person-group> (<year>2020</year>). <article-title>Hierarchical impedance-based tracking control of kinematically redundant robots</article-title>. <source>IEEE Trans. Robot</source>. <volume>36</volume>, <fpage>204</fpage>&#x02013;<lpage>221</lpage>. <pub-id pub-id-type="doi">10.1109/TRO.2019.2945876</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ferrell</surname> <given-names>W. R.</given-names></name></person-group> (<year>1965</year>). <article-title>Remote manipulation with transmission delay</article-title>. <source>IEEE Trans. Hum. Fact. Electron</source>. HFE-<volume>6</volume>, <fpage>24</fpage>&#x02013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1109/THFE.1965.6591253</pub-id><pub-id pub-id-type="pmid">14282791</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ferrell</surname> <given-names>W. R.</given-names></name> <name><surname>Sheridan</surname> <given-names>T. B.</given-names></name></person-group> (<year>1967</year>). <article-title>Supervisory control of remote manipulation</article-title>. <source>IEEE Spectr</source>. <volume>4</volume>, <fpage>81</fpage>&#x02013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1109/MSPEC.1967.5217126</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Fong</surname> <given-names>T.</given-names></name> <name><surname>Thorpe</surname> <given-names>C.</given-names></name> <name><surname>Baur</surname> <given-names>C.</given-names></name></person-group> (<year>2001</year>). <article-title>A safeguarded teleoperation controller</article-title>, in <source>IEEE International Conference on Advanced Robotics (ICAR)</source> (<publisher-loc>Budapest</publisher-loc>), <fpage>351</fpage>&#x02013;<lpage>356</lpage>.</citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ghallab</surname> <given-names>M.</given-names></name> <name><surname>Howe</surname> <given-names>A.</given-names></name> <name><surname>Christianson</surname> <given-names>D.</given-names></name> <name><surname>McDermott</surname> <given-names>D.</given-names></name> <name><surname>Ram</surname> <given-names>A.</given-names></name> <name><surname>Veloso</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>1998</year>). <article-title>PDDL - The planning domain definition language</article-title>. <source>AIPS98 Plann. Commit</source>. <volume>78</volume>, <fpage>1</fpage>&#x02013;<lpage>27</lpage>.</citation></ref>
<ref id="B30">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Goldfarb</surname> <given-names>M.</given-names></name></person-group> (<year>1998</year>). <article-title>Dimensional analysis and selective distortion in scaled bilateral telemanipulation</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source>, <volume>Vol. 2</volume>, (<publisher-loc>Leuven</publisher-loc>), <fpage>1609</fpage>&#x02013;<lpage>1614</lpage>. <pub-id pub-id-type="doi">10.1109/ROBOT.1998.677379</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Goodrich</surname> <given-names>M. A.</given-names></name> <name><surname>Crandall</surname> <given-names>J. W.</given-names></name> <name><surname>Barakova</surname> <given-names>E.</given-names></name></person-group> (<year>2013</year>). <article-title>Teleoperation and beyond for assistive humanoid robots</article-title>. <source>Rev. Hum. Fact. Ergon</source>. <volume>9</volume>, <fpage>175</fpage>&#x02013;<lpage>226</lpage>. <pub-id pub-id-type="doi">10.1177/1557234X13502463</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Gupta</surname> <given-names>N.</given-names></name> <name><surname>Smith</surname> <given-names>J.</given-names></name> <name><surname>Shrewsbury</surname> <given-names>B.</given-names></name> <name><surname>B&#x000F6;rnich</surname> <given-names>B.</given-names></name></person-group> (<year>2019</year>). <article-title>2D push recovery and balancing of the eve R3-a humanoid robot with wheel-base, using model predictive control and gain scheduling</article-title>, in <source>2019 IEEE-RAS 19th International Conference on Humanoid Robots (Humanoids)</source> (<publisher-loc>Toronto, ON</publisher-loc>), <fpage>365</fpage>&#x02013;<lpage>372</lpage>. <pub-id pub-id-type="doi">10.1109/Humanoids43949.2019.9035044</pub-id><pub-id pub-id-type="pmid">34000257</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hagn</surname> <given-names>U.</given-names></name> <name><surname>Konietschke</surname> <given-names>R.</given-names></name> <name><surname>Tobergte</surname> <given-names>A.</given-names></name> <name><surname>Nickl</surname> <given-names>M.</given-names></name> <name><surname>J&#x000F6;rg</surname> <given-names>S.</given-names></name> <name><surname>K&#x000FC;bler</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>DLR MiroSurge: a versatile system for research in endoscopic telesurgery</article-title>. <source>Int. J. Comput. Assist. Radiol. Surg</source>. <volume>5</volume>, <fpage>183</fpage>&#x02013;<lpage>193</lpage>. <pub-id pub-id-type="doi">10.1007/s11548-009-0372-4</pub-id><pub-id pub-id-type="pmid">20033517</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hannaford</surname> <given-names>B.</given-names></name></person-group> (<year>1989</year>). <article-title>A design framework for teleoperators with kinesthetic feedback</article-title>. <source>IEEE Trans. Robot. Autom</source>. <volume>5</volume>, <fpage>426</fpage>&#x02013;<lpage>434</lpage>. <pub-id pub-id-type="doi">10.1109/70.88057</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hannaford</surname> <given-names>B.</given-names></name> <name><surname>Ryu</surname> <given-names>J.-H.</given-names></name></person-group> (<year>2002</year>). <article-title>Time-domain passivity control of haptic interfaces</article-title>. <source>IEEE Trans. Robot. Autom</source>. <volume>18</volume>, <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1109/70.988969</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="thesis"><person-group person-group-type="author"><name><surname>Hertkorn</surname> <given-names>K.</given-names></name></person-group> (<year>2016</year>). <source>Shared grasping: a combination of telepresence and grasp planning</source> (<publisher-loc>Ph.D. thesis</publisher-loc>). Karlsruher Institut f&#x000FC;r Technologie (KIT), Karlsruhe, Germany.</citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hirzinger</surname> <given-names>G.</given-names></name> <name><surname>Brunner</surname> <given-names>B.</given-names></name> <name><surname>Dietrich</surname> <given-names>J.</given-names></name> <name><surname>Heindl</surname> <given-names>J.</given-names></name></person-group> (<year>1993</year>). <article-title>Sensor-based space robotics - rotex and its telerobotic features</article-title>. <source>IEEE Trans. Robot. Autom</source>. <volume>9</volume>, <fpage>649</fpage>&#x02013;<lpage>663</lpage>. <pub-id pub-id-type="doi">10.1109/70.258056</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Hulin</surname> <given-names>T.</given-names></name> <name><surname>Gonz&#x000E1;lez Camarero</surname> <given-names>R.</given-names></name> <name><surname>Albu-Sch&#x000E4;ffer</surname> <given-names>A.</given-names></name></person-group> (<year>2013</year>). <article-title>Optimal control for haptic rendering: Fast energy dissipation and minimum overshoot</article-title>, in <source>IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</source> (<publisher-loc>Tokyo</publisher-loc>), <fpage>4505</fpage>&#x02013;<lpage>4511</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2013.6697004</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Inagaki</surname> <given-names>T.</given-names></name></person-group> (<year>2003</year>). <article-title>Adaptive automation: Sharing and trading of control</article-title>, in <source>Handbook of Cognitive Task Design</source>, <volume>Vol. 8</volume>, eds, <person-group person-group-type="editor"><name><surname>Hollnagel</surname></name> <name><surname>Erik</surname></name></person-group> (<publisher-loc>Mahawa, NJ</publisher-loc>: <publisher-name>Lawrence Erlbaum Associates, Inc.</publisher-name>), <fpage>147</fpage>&#x02013;<lpage>169</lpage>. <pub-id pub-id-type="doi">10.1201/9781410607775.ch8</pub-id></citation></ref>
<ref id="B40">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Johnsen</surname> <given-names>E. G.</given-names></name> <name><surname>Corliss</surname> <given-names>W. R.</given-names></name></person-group> (<year>1971</year>). <source>Human Factors Applications in Teleoperator Design and Operation</source>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Wiley-Interscience</publisher-name>.</citation></ref>
<ref id="B41">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Kang</surname> <given-names>H.</given-names></name> <name><surname>Park</surname> <given-names>Y. S.</given-names></name> <name><surname>Ewing</surname> <given-names>T. F.</given-names></name> <name><surname>Faulring</surname> <given-names>E.</given-names></name> <name><surname>Colgate</surname> <given-names>J. E.</given-names></name></person-group> (<year>2004</year>). <article-title>Visually and haptically augmented teleoperation in D&#x00026;D tasks using virtual fixtures</article-title>, in <source>International Conference on Robotics and Remote Systems for Hazardous Environments</source> (<publisher-loc>Gainesville, FL</publisher-loc>), <fpage>466</fpage>&#x02013;<lpage>471</lpage>.</citation></ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Klodmann</surname> <given-names>J.</given-names></name> <name><surname>Schlenk</surname> <given-names>C.</given-names></name> <name><surname>Borsdorf</surname> <given-names>S.</given-names></name> <name><surname>Unterhinninghofen</surname> <given-names>R.</given-names></name> <name><surname>Albu-Sch&#x000E4;ffer</surname> <given-names>A.</given-names></name> <name><surname>Hirzinger</surname> <given-names>G.</given-names></name></person-group> (<year>2020</year>). <article-title>Robotische assistenzsysteme f&#x000FC;r die chirurgie</article-title>. <source>Der Chirurg</source> <volume>91</volume>, <fpage>533</fpage>&#x02013;<lpage>543</lpage>. <pub-id pub-id-type="doi">10.1007/s00104-020-01205-8</pub-id><pub-id pub-id-type="pmid">32583025</pub-id></citation></ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lawrence</surname> <given-names>D. A.</given-names></name></person-group> (<year>1993</year>). <article-title>Stability and transparency in bilateral teleoperation</article-title>. <source>IEEE Trans. Robot. Autom</source>. <volume>9</volume>, <fpage>624</fpage>&#x02013;<lpage>637</lpage>. <pub-id pub-id-type="doi">10.1109/70.258054</pub-id></citation></ref>
<ref id="B44">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>J.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Sarkisov</surname> <given-names>Y.</given-names></name> <name><surname>De Stefano</surname> <given-names>M.</given-names></name> <name><surname>Coelho</surname> <given-names>A.</given-names></name> <name><surname>Shinde</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Visual-inertial telepresence for aerial manipulation</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Paris</publisher-loc>), <fpage>1222</fpage>&#x02013;<lpage>1229</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA40945.2020.9197394</pub-id></citation></ref>
<ref id="B45">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Leidner</surname> <given-names>D. S.</given-names></name></person-group> (<year>2019</year>). <source>Cognitive Reasoning for Compliant Robot Manipulation</source>. <publisher-name>Springer</publisher-name>. <pub-id pub-id-type="doi">10.1007/978-3-030-04858-7</pub-id></citation></ref>
<ref id="B46">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lii</surname> <given-names>N. Y.</given-names></name> <name><surname>Riecke</surname> <given-names>C.</given-names></name> <name><surname>Leidner</surname> <given-names>D.</given-names></name> <name><surname>Sch&#x000E4;tzle</surname> <given-names>S.</given-names></name> <name><surname>Schmaus</surname> <given-names>P.</given-names></name> <name><surname>Weber</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>The robot as an avatar or co-worker? An investigation of the different teleoperation modalities through the KONTUR-2 and meteron supvis justin space telerobotic missions</article-title>, in <source>Int. Astronautical Congress (IAC)</source> (<publisher-loc>Bremen</publisher-loc>).</citation></ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>McNeely</surname> <given-names>W. A.</given-names></name> <name><surname>Puterbaugh</surname> <given-names>K. D.</given-names></name> <name><surname>Troy</surname> <given-names>J. J.</given-names></name></person-group> (<year>2006</year>). <article-title>Voxel-based 6-DOF haptic rendering improvements</article-title>. <source>Haptics-e</source> <volume>3</volume>:<fpage>50</fpage>. <pub-id pub-id-type="doi">10.1145/1198555.1198606</pub-id></citation></ref>
<ref id="B48">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Miller</surname> <given-names>R.</given-names></name> <name><surname>Minsky</surname> <given-names>M. L.</given-names></name> <name><surname>Smith</surname> <given-names>D. B.</given-names></name></person-group> (<year>1982</year>). <source>Space Applications of Automation, Robotics and Machine Intelligence Systems (ARAMIS), Vol. 1: Executive Summary</source>. <publisher-name>NASA</publisher-name>.</citation></ref>
<ref id="B49">
<citation citation-type="web"><person-group person-group-type="author"><collab>MIRO Innovation Lab</collab></person-group> (<year>2017</year>). <source>MIRO Innovation Lab</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://miroinnovationlab.de/en/home-en/index.html">https://miroinnovationlab.de/en/home-en/index.html</ext-link> (accessed September 22, 2020).</citation></ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mi&#x00161;eikis</surname> <given-names>J.</given-names></name> <name><surname>Caroni</surname> <given-names>P.</given-names></name> <name><surname>Duchamp</surname> <given-names>P.</given-names></name> <name><surname>Gasser</surname> <given-names>A.</given-names></name> <name><surname>Marko</surname> <given-names>R.</given-names></name> <name><surname>Mi&#x00161;eikien&#x00117;</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Lio-a personal robot assistant for human-robot interaction and care applications</article-title>. <source>IEEE Robot. Autom. Lett</source>. <volume>5</volume>, <fpage>5339</fpage>&#x02013;<lpage>5346</lpage>. <pub-id pub-id-type="doi">10.1109/LRA.2020.3007462</pub-id></citation></ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mitra</surname> <given-names>P.</given-names></name> <name><surname>Niemeyer</surname> <given-names>G.</given-names></name></person-group> (<year>2008</year>). <article-title>Model-mediated telemanipulation</article-title>. <source>Int. J. Robot. Res</source>. <volume>27</volume>, <fpage>253</fpage>&#x02013;<lpage>262</lpage>. <pub-id pub-id-type="doi">10.1177/0278364907084590</pub-id></citation></ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Musi&#x00107;</surname> <given-names>S.</given-names></name> <name><surname>Hirche</surname> <given-names>S.</given-names></name></person-group> (<year>2017</year>). <article-title>Control sharing in human-robot team interaction</article-title>. <source>Annu. Rev. Control</source> <volume>44</volume>, <fpage>342</fpage>&#x02013;<lpage>354</lpage>. <pub-id pub-id-type="doi">10.1016/j.arcontrol.2017.09.017</pub-id></citation></ref>
<ref id="B53">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Napoli</surname> <given-names>J.</given-names></name> <name><surname>Kugler</surname> <given-names>M. S.</given-names></name></person-group> (<year>2017</year>). <article-title>The additive manufacturing facility: one year on the iss national lab</article-title>, in <source>Proceedings of the ISS Research and Development Conference.</source> <publisher-loc>Washington, DC</publisher-loc>.</citation></ref>
<ref id="B54">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Nitsch</surname> <given-names>V.</given-names></name> <name><surname>F&#x000E4;rber</surname> <given-names>B.</given-names></name> <name><surname>Hellings</surname> <given-names>A.</given-names></name> <name><surname>J&#x000F6;rg</surname> <given-names>S.</given-names></name> <name><surname>Tobergte</surname> <given-names>A.</given-names></name> <name><surname>Konietschke</surname> <given-names>R.</given-names></name></person-group> (<year>2012</year>). <article-title>Bi-modal assistance functions and their effect on user perception and movement coordination with telesurgery systems</article-title>, in <source>2012 IEEE International Workshop on Haptic Audio Visual Environments and Games (HAVE 2012) Proceedings</source> (<publisher-loc>Munich</publisher-loc>), <fpage>32</fpage>&#x02013;<lpage>37</lpage>. <pub-id pub-id-type="doi">10.1109/HAVE.2012.6374427</pub-id></citation></ref>
<ref id="B55">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ollero</surname> <given-names>A.</given-names></name> <name><surname>Heredia</surname> <given-names>G.</given-names></name> <name><surname>Franchi</surname> <given-names>A.</given-names></name> <name><surname>Antonelli</surname> <given-names>G.</given-names></name> <name><surname>Kondak</surname> <given-names>K.</given-names></name> <name><surname>Sanfeliu</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>The Aeroarms project: aerial robots with advanced manipulation capabilities for inspection and maintenance</article-title>. <source>IEEE Robot. Autom. Mag</source>. <volume>25</volume>, <fpage>12</fpage>&#x02013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1109/MRA.2018.2852789</pub-id></citation></ref>
<ref id="B56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Onal</surname> <given-names>C. D.</given-names></name> <name><surname>Sitti</surname> <given-names>M.</given-names></name></person-group> (<year>2009</year>). <article-title>A scaled bilateral control system for experimental one-dimensional teleoperated nanomanipulation</article-title>. <source>Int. J. Robot. Res</source>. <volume>28</volume>, <fpage>484</fpage>&#x02013;<lpage>497</lpage>. <pub-id pub-id-type="doi">10.1177/0278364908097773</pub-id></citation></ref>
<ref id="B57">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>Artigas</surname> <given-names>J.</given-names></name> <name><surname>Ryu</surname> <given-names>J.-H.</given-names></name> <name><surname>Ferre</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Multilateral control for delayed teleoperation</article-title>, in <source>2013 16th International Conference on Advanced Robotics (ICAR)</source> (<publisher-loc>Montevideo</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1109/ICAR.2013.6766476</pub-id><pub-id pub-id-type="pmid">29994289</pub-id></citation></ref>
<ref id="B58">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Artigas</surname> <given-names>J.</given-names></name> <name><surname>Riecke</surname> <given-names>C.</given-names></name> <name><surname>Ferre</surname> <given-names>M.</given-names></name> <name><surname>Albu-Schaeffer</surname> <given-names>A.</given-names></name></person-group> (<year>2017</year>). <article-title>Haptic intention augmentation for cooperative teleoperation</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Singapore</publisher-loc>), <fpage>5335</fpage>&#x02013;<lpage>5341</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA.2017.7989627</pub-id></citation></ref>
<ref id="B59">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Ferre</surname> <given-names>M.</given-names></name> <name><surname>Artigas</surname> <given-names>J.</given-names></name></person-group> (<year>2018a</year>). <article-title>Haptic augmentation for teleoperation through virtual grasping points</article-title>. <source>IEEE Trans. Hapt</source>, Big Sky. <volume>11</volume>, <fpage>400</fpage>&#x02013;<lpage>416</lpage>. <pub-id pub-id-type="doi">10.1109/TOH.2018.2809746</pub-id><pub-id pub-id-type="pmid">29994289</pub-id></citation></ref>
<ref id="B60">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Kruger</surname> <given-names>T.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name> <name><surname>Albu-Schaffer</surname> <given-names>A.</given-names></name></person-group> (<year>2020a</year>). <article-title>Safe interactions and kinesthetic feedback in high performance earth-to-moon teleoperation</article-title>, in <source>IEEE Aerospace Conference</source> (<publisher-name>Big Sky</publisher-name>). <pub-id pub-id-type="doi">10.1109/AERO47225.2020.9172665</pub-id></citation></ref>
<ref id="B61">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name></person-group> (<year>2020b</year>). <article-title>The 6-dof implementation of the energy-reflection based time domain passivity approach with preservation of physical coupling behavior</article-title>. <source>IEEE Robot. Autom. Lett</source>. <volume>5</volume>, <fpage>6756</fpage>&#x02013;<lpage>6763</lpage>. <pub-id pub-id-type="doi">10.1109/LRA.2020.3010727</pub-id></citation></ref>
<ref id="B62">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Stelzer</surname> <given-names>M.</given-names></name> <name><surname>Schuster</surname> <given-names>M. J.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name> <name><surname>Ferre</surname> <given-names>M.</given-names></name></person-group> (<year>2018b</year>). <article-title>Extended predictive model-mediated teleoperation of mobile robots through multilateral control</article-title>, in <source>IEEE Intelligent Vehicles Symposium (IV)</source> (<publisher-loc>Changshu</publisher-loc>), <fpage>1723</fpage>&#x02013;<lpage>1730</lpage>. <pub-id pub-id-type="doi">10.1109/IVS.2018.8500578</pub-id></citation></ref>
<ref id="B63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Parasuraman</surname> <given-names>R.</given-names></name> <name><surname>Riley</surname> <given-names>V.</given-names></name></person-group> (<year>1997</year>). <article-title>Humans and automation: use, misuse, disuse, abuse</article-title>. <source>Hum. Fact</source>. <volume>39</volume>, <fpage>230</fpage>&#x02013;<lpage>253</lpage>. <pub-id pub-id-type="doi">10.1518/001872097778543886</pub-id></citation></ref>
<ref id="B64">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Passenberg</surname> <given-names>C.</given-names></name> <name><surname>Peer</surname> <given-names>A.</given-names></name> <name><surname>Buss</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>Model-mediated teleoperation for multi-operator multi-robot systems</article-title>, in <source>IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</source> (<publisher-loc>Taipei</publisher-loc>), <fpage>4263</fpage>&#x02013;<lpage>4268</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2010.5653012</pub-id></citation></ref>
<ref id="B65">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Payandeh</surname> <given-names>S.</given-names></name> <name><surname>Stanisic</surname> <given-names>Z.</given-names></name></person-group> (<year>2002</year>). <article-title>On application of virtual fixtures as an aid for telemanipulation and training</article-title>, in <source>IEEE Haptics Symposium</source> (<publisher-loc>Orlando, FL</publisher-loc>), <fpage>18</fpage>&#x02013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1109/HAPTIC.2002.998936</pub-id></citation></ref>
<ref id="B66">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pepito</surname> <given-names>J. A.</given-names></name> <name><surname>Ito</surname> <given-names>H.</given-names></name> <name><surname>Betriana</surname> <given-names>F.</given-names></name> <name><surname>Tanioka</surname> <given-names>T.</given-names></name> <name><surname>Locsin</surname> <given-names>R. C.</given-names></name></person-group> (<year>2020</year>). <article-title>Intelligent humanoid robots expressing artificial humanlike empathy in nursing situations</article-title>. <source>Nursing Philos</source>. <volume>21</volume>:<fpage>e12318</fpage>. <pub-id pub-id-type="doi">10.1111/nup.12318</pub-id><pub-id pub-id-type="pmid">33462939</pub-id></citation></ref>
<ref id="B67">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Preusche</surname> <given-names>C.</given-names></name> <name><surname>Reintsema</surname> <given-names>D.</given-names></name> <name><surname>Landzettel</surname> <given-names>K.</given-names></name> <name><surname>Hirzinger</surname> <given-names>G.</given-names></name></person-group> (<year>2006</year>). <article-title>Robotics component verification on iss rokviss - preliminary results for telepresence</article-title>, in <source>IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</source> (<publisher-loc>Beijing</publisher-loc>), <fpage>4595</fpage>&#x02013;<lpage>4601</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2006.282165</pub-id></citation></ref>
<ref id="B68">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Quere</surname> <given-names>G.</given-names></name> <name><surname>Hagengruber</surname> <given-names>A.</given-names></name> <name><surname>Iskandar</surname> <given-names>M.</given-names></name> <name><surname>Bustamante</surname> <given-names>S.</given-names></name> <name><surname>Leidner</surname> <given-names>D.</given-names></name> <name><surname>Stulp</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Shared control templates for assistive robotics</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Paris</publisher-loc>), <fpage>1956</fpage>&#x02013;<lpage>1962</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA40945.2020.9197041</pub-id></citation></ref>
<ref id="B69">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Riecke</surname> <given-names>C.</given-names></name> <name><surname>Artigas</surname> <given-names>J.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Bayer</surname> <given-names>R.</given-names></name> <name><surname>Beyer</surname> <given-names>A.</given-names></name> <name><surname>Brunner</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Kontur-2 mission: the DLR force feedback joystick for space telemanipulation from the ISS</article-title>, in <source>International Symposium on Artifical Intelligence, Robotics, and Automation in Space (i-SAIRAS)</source> (<publisher-loc>Beijing</publisher-loc>).</citation></ref>
<ref id="B70">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Riecke</surname> <given-names>C.</given-names></name> <name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Maier</surname> <given-names>M.</given-names></name> <name><surname>Stelzer</surname> <given-names>M.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Kondratiev</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Kontur-3: Human machine interfaces for telenavigation and manipulation of robots from ISS</article-title>, in <source>IEEE Aerospace Conference</source> (<publisher-loc>Big Sky</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1109/AERO47225.2020.9172347</pub-id></citation></ref>
<ref id="B71">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Riek</surname> <given-names>L. D.</given-names></name></person-group> (<year>2017</year>). <article-title>Healthcare robotics</article-title>. <source>Commun. ACM</source> <volume>60</volume>, <fpage>68</fpage>&#x02013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1145/3127874</pub-id></citation></ref>
<ref id="B72">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Rosenberg</surname> <given-names>L. B.</given-names></name></person-group> (<year>1992</year>). <source>The Use of Virtual Fixtures as Perceptual Overlays to Enhance Operator Performance in Remote Environments</source>. Technical report, <publisher-loc>Wright-Patterson AFB OH</publisher-loc>: <publisher-name>USAF Armstrong Laboratory</publisher-name>.</citation></ref>
<ref id="B73">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Rosenberg</surname> <given-names>L. B.</given-names></name></person-group> (<year>1993a</year>). <article-title>The use of virtual fixtures to enhance telemanipulation with time delay</article-title>, in <source>Proceedings of the ASME Winter Annual Meeting on Advances in Robotics, Mechatronics, and Haptic Interfaces</source>, <volume>Vol. 49</volume>, (<publisher-loc>New Orleans, LA</publisher-loc>), <fpage>29</fpage>&#x02013;<lpage>36</lpage>.</citation></ref>
<ref id="B74">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Rosenberg</surname> <given-names>L. B.</given-names></name></person-group> (<year>1993b</year>). <article-title>Virtual fixtures: perceptual tools for telerobotic manipulation</article-title>, in <source>Proceedings of IEEE Virtual Reality Annual International Symposium</source> (<publisher-loc>Seattle, WA</publisher-loc>), <fpage>76</fpage>&#x02013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1109/VRAIS.1993.380795</pub-id></citation></ref>
<ref id="B75">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ryu</surname> <given-names>J.-H.</given-names></name> <name><surname>Artigas</surname> <given-names>J.</given-names></name> <name><surname>Preusche</surname> <given-names>C.</given-names></name></person-group> (<year>2010</year>). <article-title>A passive bilateral control scheme for a teleoperator with time-varying communication delay</article-title>. <source>Mechatronics</source> <volume>20</volume>, <fpage>812</fpage>&#x02013;<lpage>823</lpage>. <pub-id pub-id-type="doi">10.1016/j.mechatronics.2010.07.006</pub-id></citation></ref>
<ref id="B76">
<citation citation-type="thesis"><person-group person-group-type="author"><name><surname>Sagardia</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <source>Virtual manipulations with force feedback in complex interaction scenarios</source> (<publisher-loc>Ph.D. thesis</publisher-loc>). Technische Universit&#x000E4;t M&#x000FC;nchen, Munich, Germany.</citation></ref>
<ref id="B77">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Sagardia</surname> <given-names>M.</given-names></name> <name><surname>Stouraitis</surname> <given-names>T.</given-names></name> <name><surname>e Silva</surname> <given-names>J. L.</given-names></name></person-group> (<year>2014</year>). <article-title>A new fast and robust collision detection and force computation algorithm applied to the physics engine bullet: method, integration, and evaluation</article-title>, in <source>EuroVR</source> (<publisher-loc>Bremen</publisher-loc>), <fpage>65</fpage>&#x02013;<lpage>76</lpage>.</citation></ref>
<ref id="B78">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Sarkisov</surname> <given-names>Y. S.</given-names></name> <name><surname>Kim</surname> <given-names>M. J.</given-names></name> <name><surname>Bicego</surname> <given-names>D.</given-names></name> <name><surname>Tsetserukou</surname> <given-names>D.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name> <name><surname>Franchi</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Development of SAM: cable-suspended aerial manipulator</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source>, <fpage>5323</fpage>&#x02013;<lpage>5329</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA.2019.8793592</pub-id></citation></ref>
<ref id="B79">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schmaus</surname> <given-names>P.</given-names></name> <name><surname>Leidner</surname> <given-names>D.</given-names></name> <name><surname>Kr&#x000FC;ger</surname> <given-names>T.</given-names></name> <name><surname>Bayer</surname> <given-names>R.</given-names></name> <name><surname>Pleintinger</surname> <given-names>B.</given-names></name> <name><surname>Schiele</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Knowledge driven orbit-to-ground teleoperation of a robot coworker</article-title>. <source>IEEE Robot. Autom. Lett</source>, IEEE. <volume>5</volume>, <fpage>143</fpage>&#x02013;<lpage>150</lpage>. <pub-id pub-id-type="doi">10.1109/LRA.2019.2948128</pub-id></citation></ref>
<ref id="B80">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Schmidt</surname> <given-names>P.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Artigas Esclusa</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>Shared control for robotic on-orbit servicing</article-title>, in <source>The Robotics: Science and Systems</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://elib.dlr.de/113169">https://elib.dlr.de/113169</ext-link></citation></ref>
<ref id="B81">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seibold</surname> <given-names>U.</given-names></name> <name><surname>K&#x000FC;bler</surname> <given-names>B.</given-names></name> <name><surname>Bahls</surname> <given-names>T.</given-names></name> <name><surname>Haslinger</surname> <given-names>R.</given-names></name> <name><surname>Steidle</surname> <given-names>F.</given-names></name></person-group> (<year>2018</year>). <article-title>The DLR MiroSurge surgical robotic demonstrator</article-title>. <source>Encyclop. Med. Robot</source>. <volume>1</volume>, <fpage>111</fpage>&#x02013;<lpage>142</lpage>. <pub-id pub-id-type="doi">10.1142/9789813232266_0005</pub-id></citation></ref>
<ref id="B82">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Sheridan</surname> <given-names>T. B.</given-names></name></person-group> (<year>1992</year>). <source>Telerobotics, Automation, and Human Supervisory Control</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>MIT Press</publisher-name>.</citation></ref>
<ref id="B83">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Sierotowicz</surname> <given-names>M.</given-names></name> <name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Belder</surname> <given-names>R.</given-names></name> <name><surname>Bussmann</surname> <given-names>K.</given-names></name> <name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Panzirsch</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>Investigating the influence of haptic feedback in rover navigation with communication delay</article-title>, in <source>EuroHaptics</source> (<publisher-loc>Paris</publisher-loc>), <fpage>1723</fpage>&#x02013;<lpage>1730</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-58147-3_58</pub-id></citation></ref>
<ref id="B84">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Jafari</surname> <given-names>A.</given-names></name> <name><surname>Peer</surname> <given-names>A.</given-names></name> <name><surname>Ryu</surname> <given-names>J.-H.</given-names></name></person-group> (<year>2018</year>). <article-title>Enhancing the command-following bandwidth for transparent bilateral teleoperation</article-title>, in <source>IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</source> (<publisher-loc>Madrid</publisher-loc>), <fpage>4972</fpage>&#x02013;<lpage>4979</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2018.8593866</pub-id></citation></ref>
<ref id="B85">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Jafari</surname> <given-names>A.</given-names></name> <name><surname>Ryu</surname> <given-names>J.-H.</given-names></name></person-group> (<year>2019a</year>). <article-title>Enhancing the force transparency of time domain passivity approach: observer-based gradient controller</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Montreal, QC</publisher-loc>), <fpage>1583</fpage>&#x02013;<lpage>1589</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA.2019.8793902</pub-id></citation></ref>
<ref id="B86">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Janetzko</surname> <given-names>D.</given-names></name> <name><surname>Jafari</surname> <given-names>A.</given-names></name> <name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Lee</surname> <given-names>C.-I.</given-names></name> <name><surname>Ryu</surname> <given-names>J.-H.</given-names></name></person-group> (<year>2019b</year>). <article-title>Enhancing the rate-hardness of haptic interaction: successive force augmentation approach</article-title>. <source>IEEE Trans. Indus. Electron</source>. <volume>67</volume>, <fpage>809</fpage>&#x02013;<lpage>819</lpage>. <pub-id pub-id-type="doi">10.1109/TIE.2019.2918500</pub-id></citation></ref>
<ref id="B87">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>H.</given-names></name> <name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>Coelho</surname> <given-names>A.</given-names></name> <name><surname>Ott</surname> <given-names>C.</given-names></name></person-group> (<year>2020</year>). <article-title>Proxy-based approach for position synchronization of delayed robot coupling without sacrificing performance</article-title>. <source>IEEE Robot. Autom. Lett</source>. <volume>5</volume>, <fpage>6599</fpage>&#x02013;<lpage>6606</lpage>. <pub-id pub-id-type="doi">10.1109/LRA.2020.3013860</pub-id></citation></ref>
<ref id="B88">
<citation citation-type="web"><person-group person-group-type="author"><collab>Spaceflight</collab></person-group> (<year>2020</year>). <source>Pricing Information</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://spaceflight.com/schedulepricing/pricing">http://spaceflight.com/schedulepricing/pricing</ext-link> (accessed August 26, 2020).</citation></ref>
<ref id="B89">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Sundermeyer</surname> <given-names>M.</given-names></name> <name><surname>Marton</surname> <given-names>Z.-C.</given-names></name> <name><surname>Durner</surname> <given-names>M.</given-names></name> <name><surname>Brucker</surname> <given-names>M.</given-names></name> <name><surname>Triebel</surname> <given-names>R.</given-names></name></person-group> (<year>2018</year>). <article-title>Implicit 3d orientation learning for 6d object detection from RGB images</article-title>, in <source>Proceedings of the European Conference on Computer Vision (ECCV)</source> (<publisher-loc>Munich</publisher-loc>). <pub-id pub-id-type="doi">10.1007/978-3-030-01231-1_43</pub-id></citation></ref>
<ref id="B90">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Tobergte</surname> <given-names>A.</given-names></name> <name><surname>Albu-Sch&#x000E4;ffer</surname> <given-names>A.</given-names></name></person-group> (<year>2012</year>). <article-title>Direct force reflecting teleoperation with a flexible joint robot</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Saint Paul, MN</publisher-loc>), <fpage>4280</fpage>&#x02013;<lpage>4287</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA.2012.6224617</pub-id></citation></ref>
<ref id="B91">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Tobergte</surname> <given-names>A.</given-names></name> <name><surname>Helmer</surname> <given-names>P.</given-names></name></person-group> (<year>2013</year>). <article-title>A disturbance observer for the sigma. 7 haptic device</article-title>, in <source>2013 IEEE/RSJ International Conference on Intelligent Robots and Systems</source> (<publisher-loc>Tokyo</publisher-loc>), <fpage>4964</fpage>&#x02013;<lpage>4969</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2013.6697073</pub-id></citation></ref>
<ref id="B92">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Tobergte</surname> <given-names>A.</given-names></name> <name><surname>Helmer</surname> <given-names>P.</given-names></name> <name><surname>Hagn</surname> <given-names>U.</given-names></name> <name><surname>Rouiller</surname> <given-names>P.</given-names></name> <name><surname>Thielmann</surname> <given-names>S.</given-names></name> <name><surname>Grange</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>The sigma. 7 haptic interface for mirosurge: a new bi-manual surgical console</article-title>, in <source>2011 IEEE/RSJ International Conference on Intelligent Robots and Systems</source> (<publisher-loc>San Francisco, CA</publisher-loc>), <fpage>3023</fpage>&#x02013;<lpage>3030</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2011.6048043</pub-id></citation></ref>
<ref id="B93">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Turro</surname> <given-names>N.</given-names></name> <name><surname>Khatib</surname> <given-names>O.</given-names></name></person-group> (<year>2001</year>). <article-title>Haptically augmented teleoperation</article-title>, in <source>IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Seoul; Springer</publisher-loc>), <fpage>386</fpage>&#x02013;<lpage>392</lpage>.</citation></ref>
<ref id="B94">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vogel</surname> <given-names>J.</given-names></name> <name><surname>Leidner</surname> <given-names>D.</given-names></name> <name><surname>Hagengruber</surname> <given-names>A.</given-names></name> <name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>B&#x000E4;uml</surname> <given-names>B.</given-names></name> <name><surname>Denninger</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>An ecosystem for heterogeneous robotic assistants in caregiving</article-title>. <source>IEEE Robot. Autom. Mag</source>. <pub-id pub-id-type="doi">10.1109/MRA.2020.3032142</pub-id></citation></ref>
<ref id="B95">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Weber Martins</surname> <given-names>T.</given-names></name> <name><surname>Pereira</surname> <given-names>A.</given-names></name> <name><surname>Hulin</surname> <given-names>T.</given-names></name> <name><surname>Ruf</surname> <given-names>O.</given-names></name> <name><surname>Kugler</surname> <given-names>S.</given-names></name> <name><surname>Giordano</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Space factory 4.0-new processes for the robotic assembly of modular satellites on an in-orbit platform based on industrie 4.0 approach</article-title>, in <source>Proceedings of the International Astronautical Congress (IAC)</source> (<publisher-loc>Bremen</publisher-loc>: <publisher-name>IAC</publisher-name>).</citation></ref>
<ref id="B96">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Balachandran</surname> <given-names>R.</given-names></name> <name><surname>Riecke</surname> <given-names>C.</given-names></name> <name><surname>Stulp</surname> <given-names>F.</given-names></name> <name><surname>Stelzer</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Teleoperating robots from the international space station: microgravity effects on performance with force feedback</article-title>, in <source>IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</source> (<publisher-loc>Macau</publisher-loc>), <fpage>8138</fpage>&#x02013;<lpage>8144</lpage>. <pub-id pub-id-type="doi">10.1109/IROS40897.2019.8968030</pub-id></citation></ref>
<ref id="B97">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Hellings</surname> <given-names>A.</given-names></name> <name><surname>Tobergte</surname> <given-names>A.</given-names></name> <name><surname>Lohmann</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Human performance and workload evaluation of input modalities for telesurgery</article-title>, in <source>Chancen durch Produkt- und Systemgestaltung - Zukunftsf&#x000E4;higkeit f&#x000FC;r Produktions- und Dienstleistungsunternehmen</source>, ed <person-group person-group-type="editor"><collab>German Society of Ergonomics (GfA)</collab></person-group> (<publisher-loc>Dortmund</publisher-loc>: <publisher-name>GfA-Press</publisher-name>), <fpage>409</fpage>&#x02013;<lpage>412</lpage>.</citation></ref>
<ref id="B98">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Panzirsch</surname> <given-names>M.</given-names></name> <name><surname>Stulp</surname> <given-names>F.</given-names></name> <name><surname>Schneider</surname> <given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>Sensorimotor performance and haptic support in simulated weightlessness</article-title>. <source>Exp. Brain Res</source>. <volume>238</volume>, <fpage>2373</fpage>&#x02013;<lpage>2384</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-020-05898-5</pub-id><pub-id pub-id-type="pmid">32767066</pub-id></citation></ref>
<ref id="B99">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Weber</surname> <given-names>B.</given-names></name> <name><surname>Schneider</surname> <given-names>S.</given-names></name></person-group> (<year>2014</year>). <article-title>The effects of force feedback on surgical task performance: a meta-analytical integration</article-title>, in <source>International Conference on Human Haptic Sensing and Touch Enabled Computer Applications</source> (<publisher-loc>Versailles</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>150</fpage>&#x02013;<lpage>157</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-662-44196-1_19</pub-id></citation></ref>
<ref id="B100">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Wedler</surname> <given-names>A.</given-names></name> <name><surname>Rebele</surname> <given-names>B.</given-names></name> <name><surname>Reill</surname> <given-names>J.</given-names></name> <name><surname>Suppa</surname> <given-names>M.</given-names></name> <name><surname>Hirschm&#x000FC;ller</surname> <given-names>H.</given-names></name> <name><surname>Brand</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>LRU-lightweight rover unit</article-title>, in <source>Proceedings of the 13th Symposium on Advanced Space Technologies in Robotics and Automation (ASTRA)</source> (<publisher-loc>Noordwijk</publisher-loc>).</citation></ref>
<ref id="B101">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Xia</surname> <given-names>T.</given-names></name> <name><surname>L&#x000E9;onard</surname> <given-names>S.</given-names></name> <name><surname>Deguet</surname> <given-names>A.</given-names></name> <name><surname>Whitcomb</surname> <given-names>L.</given-names></name> <name><surname>Kazanzides</surname> <given-names>P.</given-names></name></person-group> (<year>2012</year>). <article-title>Augmented reality environment with virtual fixtures for robotic telemanipulation in space</article-title>, in <source>IEEE/RSJ Int. Conf. on Intelligent Robots and Systems (IROS)</source> (<publisher-loc>Vilamoura</publisher-loc>), <fpage>5059</fpage>&#x02013;<lpage>5064</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2012.6386169</pub-id></citation></ref>
<ref id="B102">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>X.</given-names></name> <name><surname>Cizmeci</surname> <given-names>B.</given-names></name> <name><surname>Schuwerk</surname> <given-names>C.</given-names></name> <name><surname>Steinbach</surname> <given-names>E.</given-names></name></person-group> (<year>2016</year>). <article-title>Model-mediated teleoperation: toward stable and transparent teleoperation systems</article-title>. <source>IEEE Access</source> <volume>4</volume>, <fpage>425</fpage>&#x02013;<lpage>449</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2016.2517926</pub-id></citation></ref>
<ref id="B103">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>X.</given-names></name> <name><surname>Schuwerk</surname> <given-names>C.</given-names></name> <name><surname>Steinbach</surname> <given-names>E.</given-names></name></person-group> (<year>2015</year>). <article-title>Passivity-based model updating for model-mediated teleoperation</article-title>, in <source>2015 IEEE International Conference on Multimedia &#x00026; Expo Workshops (ICMEW)</source> (<publisher-loc>Torino</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1109/ICMEW.2015.7169831</pub-id></citation></ref>
</ref-list>
<fn-group>
<fn id="fn0001"><p><sup>1</sup>The acronym SMiLE stands for service robotics for people in restricted living situations (in German &#x0201C;Servicerobotik f&#x000FC;r Menschen in Lebenssituationen mit Einschr&#x000E4;nkungen&#x0201D;).</p></fn>
</fn-group>
<fn-group>
<fn fn-type="financial-disclosure"><p><bold>Funding.</bold> This research work was funded by several funding sources. The work mention in section 2.1 was partially funded by the German Research Foundation (DFG, Deutsche Forschungsgemeinschaft) as part of Germany&#x00027;s Excellence Strategy&#x02013;EXC 2050/1&#x02013;Project ID 390696704&#x02013;Cluster of Excellence Centre for Tactile Internet with Human-in-the-Loop (CeTI) of Technische Universit&#x000E4;t Dresden. Section 4.1 contains results achieved in the project Space Factory 4.0 funded by German Aerospace Center (DLR) and the Federal Ministry for Economic Affairs and Energy (BMWi). The work of section 4.2 was partially funded by the Bavarian Ministry of Economic Affairs, Regional Development and Energy, within the projects SMiLE (LABAY97) and SMiLE2gether (LABAY102).</p></fn>
</fn-group>
</back>
</article>
