-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathP2_trajectory_tracking.py
77 lines (61 loc) · 2.43 KB
/
P2_trajectory_tracking.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import typing as T
import numpy as np
from numpy import linalg
V_PREV_THRES = 0.0001
class TrajectoryTracker:
""" Trajectory tracking controller using differential flatness """
def __init__(self, kpx: float, kpy: float, kdx: float, kdy: float,
V_max: float = 0.5, om_max: float = 1) -> None:
self.kpx = kpx
self.kpy = kpy
self.kdx = kdx
self.kdy = kdy
self.V_max = V_max
self.om_max = om_max
self.coeffs = np.zeros(8) # Polynomial coefficients for x(t) and y(t) as
# returned by the differential flatness code
def reset(self) -> None:
self.V_prev = 0.
self.om_prev = 0.
self.t_prev = 0.
def load_traj(self, times: np.ndarray, traj: np.ndarray) -> None:
""" Loads in a new trajectory to follow, and resets the time """
self.reset()
self.traj_times = times
self.traj = traj
def get_desired_state(self, t: float) -> T.Tuple[np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray, np.ndarray]:
"""
Input:
t: Current time
Output:
x_d, xd_d, xdd_d, y_d, yd_d, ydd_d: Desired state and derivatives
at time t according to self.coeffs
"""
x_d = np.interp(t,self.traj_times,self.traj[:,0])
y_d = np.interp(t,self.traj_times,self.traj[:,1])
xd_d = np.interp(t,self.traj_times,self.traj[:,3])
yd_d = np.interp(t,self.traj_times,self.traj[:,4])
xdd_d = np.interp(t,self.traj_times,self.traj[:,5])
ydd_d = np.interp(t,self.traj_times,self.traj[:,6])
return x_d, xd_d, xdd_d, y_d, yd_d, ydd_d
def compute_control(self, x: float, y: float, th: float, t: float) -> T.Tuple[float, float]:
"""
Inputs:
x,y,th: Current state
t: Current time
Outputs:
V, om: Control actions
"""
dt = t - self.t_prev
x_d, xd_d, xdd_d, y_d, yd_d, ydd_d = self.get_desired_state(t)
########## Code starts here ##########
########## Code ends here ##########
# apply control limits
V = np.clip(V, -self.V_max, self.V_max)
om = np.clip(om, -self.om_max, self.om_max)
# save the commands that were applied and the time
self.t_prev = t
self.V_prev = V
self.om_prev = om
return V, om