-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathpapers.bib
282 lines (243 loc) · 16.8 KB
/
papers.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
---
---
@inproceedings{yan2024unified,
bibtex_show = {true},
title = {A Unified Interaction Control Framework for Safe Robotic Ultrasound Scanning with Human-Intention-Aware Compliance},
selected = {true},
author = {Xiangjie Yan and Luo Shaqi and Yongpeng Jiang and Mingrui Yu and Chen Chen and Senqiang Zhu and Gao Huang and Shiji Song and Xiang Li},
year = {2024},
eprint = {2302.05685},
arxiv = {2302.05685},
archiveprefix = {arXiv},
primaryclass = {cs.RO},
preview = {yan2023multimodal.png},
booktitle = {2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
website = {https://yanseim.github.io/iros24ultrasound/}
}
@article{yu2023generalizable,
bibtex_show = {true},
selected = {true},
preview = {yu2023generalizable.gif},
arxiv = {2310.09899},
website = {https://mingrui-yu.github.io/DLO_planning_2/},
title = {Generalizable whole-body global manipulation of deformable linear objects by dual-arm robot in 3-D constrained environments},
author = {Yu, Mingrui and Lv, Kangchen and Wang, Changhao and Jiang, Yongpeng and Tomizuka, Masayoshi and Li, Xiang},
journal = {The International Journal of Robotics Research},
year = {2024}
}
@inproceedings{yu2024inhand,
bibtex_show = {true},
selected = {true},
preview = {yu2024inhand.gif},
arxiv = {2403.12676},
website = {https://mingrui-yu.github.io/DLO_following/},
author = {Yu, Mingrui and Liang, Boyuan and Zhang, Xiang and Zhu, Xinghao and Li, Xiang and Tomizuka, Masayoshi},
booktitle = {2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
title = {In-Hand Following of Deformable Linear Objects Using Dexterous Fingers with Tactile Sensing},
year = {2024}
}
@inproceedings{jiang2024contact,
bibtex_show = {true},
preview = {jiang2024contact.png},
website = {https://director-of-g.github.io/in_hand_manipulation/},
title = {Contact-Implicit Model Predictive Control for Dexterous In-hand Manipulation: A Long-Horizon and Robust Approach},
author = {Yongpeng Jiang and Mingrui Yu and Xinghao Zhu and Masayoshi Tomizuka and Xiang Li},
booktitle = {2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year = {2024},
eprint = {2402.18897},
arxiv = {2402.18897},
archiveprefix = {arXiv},
primaryclass = {cs.RO},
selected = {true}
}
@inproceedings{chen2024visual,
bibtex_show = {true},
title = {Visual Attention Based Cognitive Human--Robot Collaboration for Pedicle Screw Placement in Robot-Assisted Orthopedic Surgery},
author = {Chen Chen and Qikai Zou and Yuhang Song and Shiji Song and Xiang Li},
booktitle = {2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year = {2024},
eprint = {2405.09359},
arxiv = {2405.09359},
archiveprefix = {arXiv},
primaryclass = {cs.RO},
preview = {chen2024visual.png},
selected = {true}
}
@article{yan2024complementary,
bibtex_show = {true},
selected = {true},
preview = {yan2024complementary.png},
author = {Yan, Xiangjie and Jiang, Yongpeng and Chen, Chen and Gong, Leiliang and Ge, Ming and Zhang, Tao and Li, Xiang},
journal = {IEEE Transactions on Control Systems Technology},
title = {A Complementary Framework for Human–Robot Collaboration With a Mixed AR–Haptic Interface},
year = {2024},
volume = {32},
number = {1},
pages = {112-127},
abstract = {There is invariably a tradeoff between safety and efficiency for collaborative robots (cobots) in human–robot collaborations (HRCs). Robots that interact minimally with humans can work with high speed and accuracy but cannot adapt to new tasks or respond to unforeseen changes, whereas robots that work closely with humans can but only by becoming passive to humans, meaning that their main tasks are suspended and efficiency compromised. Accordingly, this article proposes a new complementary framework for HRC that balances the safety of humans and the efficiency of robots. In this framework, the robot carries out given tasks using a vision-based adaptive controller, and the human expert collaborates with the robot in the null space. Such a decoupling drives the robot to deal with existing issues in task space [e.g., uncalibrated camera, limited field of view (FOV)] and null space (e.g., joint limits) by itself while allowing the expert to adjust the configuration of the robot body to respond to unforeseen changes (e.g., sudden invasion, change in environment) without affecting the robot’s main task. In addition, the robot can simultaneously learn the expert’s demonstration in task space and null space beforehand with dynamic movement primitives (DMPs). Therefore, an expert’s knowledge and a robot’s capability are explored and complement each other. Human demonstration and involvement are enabled via a mixed interaction interface, i.e., augmented reality (AR) and haptic devices. The stability of the closed-loop system is rigorously proved with Lyapunov methods. Experimental results in various scenarios are presented to illustrate the performance of the proposed method.},
doi = {10.1109/TCST.2023.3301675},
issn = {1558-0865},
month = {Jan}
}
@inproceedings{jia2024efficient,
bibtex_show = {true},
selected = {true},
title = {Efficient Model Learning and Adaptive Tracking Control of Magnetic Micro-Robots for Non-Contact Manipulation},
booktitle = {2024 IEEE International Conference on Robotics and Automation (ICRA)},
preview = {jia2024efficient.png},
arxiv = {2403.14414},
author = {Jia, Yongyi and Miao, Shu and Zhou, Junjian and Jiao, Niandong and Liu, Lianqing and Li, Xiang},
abstract = {Magnetic microrobots can be navigated by an external magnetic field to autonomously move within living organisms with complex and unstructured environments. Potential applications include drug delivery, diagnostics, and therapeutic interventions. Existing techniques commonly impart magnetic properties to the target object,or drive the robot to contact and then manipulate the object, both probably inducing physical damage. This paper considers a non-contact formulation, where the robot spins to generate a repulsive field to push the object without physical contact. Under such a formulation, the main challenge is that the motion model between the input of the magnetic field and the output velocity of the target object is commonly unknown and difficult to analyze. To deal with it, this paper proposes a data-driven-based solution. A neural network is constructed to efficiently estimate the motion model. Then, an approximate model-based optimal control scheme is developed to push the object to track a time-varying trajectory, maintaining the non-contact with distance constraints. Furthermore, a straightforward planner is introduced to assess the adaptability of non-contact manipulation in a cluttered unstructured environment. Experimental results are presented to show the tracking and navigation performance of the proposed scheme.},
year = {2024}
}
@inproceedings{chen2023safe,
bibtex_show = {true},
title = {Safe and Individualized Motion Planning for Upper-limb Exoskeleton Robots Using Human Demonstration and Interactive Learning},
author = {Chen, Yu and Chen, Gong and Ye, Jing and Qiu, Xiangjun and Li, Xiang},
year = {2024},
month = {May},
booktitle = {2024 International Conference on Robotics and Automation (ICRA)},
eprint = {2309.08178},
arxiv = {2309.08178},
archiveprefix = {arXiv},
primaryclass = {cs.RO},
preview = {chensafe23.gif},
selected = {true}
}
@misc{chen2023learning,
bibtex_show = {true},
title = {Learning to Assist Different Wearers in Multitasks: Efficient and Individualized Human-In-the-Loop Adaption Framework for Exoskeleton Robots},
author = {Chen, Yu and Miao, Shu and Chen, Gong and Ye, Jing and Fu, Chenglong and Liang, Bin and Li, Xiang},
journal = {IEEE Transactions on Robotics},
year = {2024},
month = {Sep},
eprint = {2309.14720},
arxiv = {2309.14720},
archiveprefix = {arXiv},
primaryclass = {cs.RO},
preview = {chen23HIL.gif},
selected = {true}
}
@inproceedings{jiang2023contact,
bibtex_show = {true},
selected = {true},
preview = {jiang2023contact.png},
arxiv = {2303.03635},
website = {https://director-of-g.github.io/push_in_clutter/},
author = {Yongpeng Jiang and Yongyi Jia and Xiang Li},
booktitle = {2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
title = {Contact-Aware Non-prehensile Robotic Manipulation for Object Retrieval in Cluttered Environments},
year = {2023},
volume = {},
number = {},
pages = {10604-10611},
doi = {10.1109/IROS55552.2023.10341476}
}
@inproceedings{shu2023two,
title = {Two-Stage Trajectory-Tracking Control of Cable-Driven Upper-Limb Exoskeleton Robots with Series Elastic Actuators: A Simple, Accurate, and Force-Sensorless Method},
author = {Shu, Yana and Chen, Yu and Zhang, Xuan and Zhang, Shisheng and Chen, Gong and Ye, Jing and Li, Xiang},
booktitle = {2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
doi = {10.1109/IROS55552.2023.10342056},
pages = {2470--2476},
year = {2023},
preview = {shu23two.png},
selected = {true},
organization = {IEEE}
}
@article{yu2023global,
bibtex_show = {true},
selected = {true},
preview = {yu2023global.gif},
arxiv = {2205.04004},
website = {https://mingrui-yu.github.io/shape_control_DLO_2/},
code = {https://github.com/Mingrui-Yu/shape_control_DLO_2},
author = {Yu, Mingrui and Lv, Kangchen and Zhong, Hanzhong and Song, Shiji and Li, Xiang},
journal = {IEEE Transactions on Robotics},
title = {Global Model Learning for Large Deformation Control of Elastic Deformable Linear Objects: An Efficient and Adaptive Approach},
year = {2023},
volume = {39},
number = {1},
pages = {417-436},
doi = {10.1109/TRO.2022.3200546}
}
@inproceedings{yu2023acoarse,
bibtex_show = {true},
selected = {true},
preview = {yu2023acoarse.gif},
arxiv = {2209.11145},
website = {https://mingrui-yu.github.io/DLO_planning/},
author = {Yu, Mingrui and Lv, Kangchen and Wang, Changhao and Tomizuka, Masayoshi and Li, Xiang},
booktitle = {2023 IEEE International Conference on Robotics and Automation (ICRA)},
title = {A Coarse-to-Fine Framework for Dual-Arm Manipulation of Deformable Linear Objects with Whole-Body Obstacle Avoidance},
year = {2023},
pages = {10153-10159},
doi = {10.1109/ICRA48891.2023.10160264}
}
@inproceedings{lv2023learning,
bibtex_show = {true},
selected = {true},
preview = {lv2023learning.gif},
arxiv = {2210.01433},
author = {Lv, Kangchen and Yu, Mingrui and Pu, Yifan and Jiang, Xin and Huang, Gao and Li, Xiang},
booktitle = {2023 IEEE International Conference on Robotics and Automation (ICRA)},
title = {Learning to Estimate 3-D States of Deformable Linear Objects from Single-Frame Occluded Point Clouds},
year = {2023},
volume = {},
number = {},
pages = {7119-7125},
keywords = {Point cloud compression;Learning systems;Geometry;Solid modeling;Automation;Shape;Wires},
doi = {10.1109/ICRA48891.2023.10160784}
}
@inproceedings{zhang2023multi,
title = {Multi-Modal Learning and Relaxation of Physical Conflict for an Exoskeleton Robot with Proprioceptive Perception},
author = {Zhang, Xuan and Shu, Yana and Chen, Yu and Chen, Gong and Ye, Jing and Li, Xiu and Li, Xiang},
booktitle = {2023 IEEE International Conference on Robotics and Automation (ICRA)},
doi = {10.1109/ICRA48891.2023.10161255},
pages = {10490--10496},
year = {2023},
preview = {zhan23multi.png},
selected = {true},
organization = {IEEE}
}
@inproceedings{jia2022hierarchical,
bibtex_show = {true},
selected = {true},
preview = {jia2022hierarchical.png},
title = {Hierarchical Learning and Control for In-Hand Micromanipulation Using Multiple Laser-Driven Micro-Tools},
author = {Jia, Yongyi and Chen, Yu and Liu, Hao and Li, Xiu and Li, Xiang},
abstract = {Laser-driven micro-tools are formulated by treating highly-focused laser beams as actuators, to control the tool's motion to contact then manipulate a micro object, which allows it to manipulate opaque micro objects, or large cells without causing photodamage. However, most existing laser-driven tools are limited to relatively simple tasks, such as moving and caging, and cannot carry out in-hand dexterous tasks. This is mainly because in-hand manipulation involves continuously coordinating multiple laser beams, micro-tools, and the object itself, which has high degrees of freedom (DoF) and poses up challenge for planner and controller design. This paper presents a new hierarchical formulation for the grasping and manipulation of micro objects using multiple laser-driven micro-tools. In hardware, multiple laser-driven tools are assembled to act as a robotic hand to carry out in-hand tasks (e.g., rotating); in software, a hierarchical scheme is developed to shrunken the action space and coordinate the motion of multiple tools, subject to both the parametric uncertainty in the tool and the unknown dynamic model of the object. Such a formulation provides potential for achieving robotic in-hand manipulation at a micro scale. The performance of the proposed system is validated in simulation studies under different scenarios.},
booktitle = {2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {1047--1054},
year = {2022},
organization = {IEEE}
}
@inproceedings{yan2022adaptive,
bibtex_show = {true},
author = {Yan, Xiangjie and Chen, Chen and Li, Xiang},
booktitle = {2022 International Conference on Robotics and Automation (ICRA)},
title = {Adaptive Vision-Based Control of Redundant Robots with Null-Space Interaction for Human-Robot Collaboration},
year = {2022},
pages = {2803-2809},
abstract = {Human-robot collaboration aims to extend human ability through cooperation with robots. This technology is currently helping people with physical disabilities, has transformed the manufacturing process of companies, improved surgical performance, and will likely revolutionize the daily lives of everyone in the future. Being able to enhance the performance of both sides, such that human-robot collaboration outperforms a single robot/human, remains an open issue. For safer and more effective collaboration, a new control scheme has been proposed for redundant robots in this paper, consisting of an adaptive vision-based control term in task space and an interactive control term in null space. Such a formulation allows the robot to autonomously carry out tasks in an unknown environment without prior calibration while also interacting with humans to deal with unforeseen changes (e.g., potential collision, temporary needs) under the redundant configuration. The decoupling between task space and null space helps to explore the collaboration safely and effectively without affecting the main task of the robot end-effector. The stability of the closed-loop system has been rigorously proved with Lyapunov methods, and both the convergence of the position error in task space and that of the damping model in null space are guaranteed. The experimental results of a robot manipulator guided with the technology of augmented reality (AR) are presented to illustrate the performance of the control scheme.},
doi = {10.1109/ICRA46639.2022.9812218},
code = {https://github.com/yanseim/Vision-Based-Control},
month = {May},
selected = {true},
preview = {yan2022adaptive.jpeg}
}
@inproceedings{yu2022shape,
bibtex_show = {true},
selected = {true},
preview = {yu2022shape.gif},
arxiv = {2109.11091},
website = {https://mingrui-yu.github.io/shape_control_DLO/},
code = {https://github.com/Mingrui-Yu/shape_control_DLO},
author = {Yu, Mingrui and Zhong, Hanzhong and Li, Xiang},
booktitle = {2022 International Conference on Robotics and Automation (ICRA)},
title = {Shape Control of Deformable Linear Objects with Offline and Online Learning of Local Linear Deformation Models},
year = {2022},
volume = {},
number = {},
pages = {1337-1343},
doi = {10.1109/ICRA46639.2022.9812244}
}