Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Coperception - support cooperative perception based on OpenCOOD #187

Draft
wants to merge 15 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
File renamed without changes.
9 changes: 6 additions & 3 deletions opencda/core/common/cav_world.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,22 +36,25 @@ class CavWorld(object):
The machine learning manager class.
"""

def __init__(self, apply_ml=False):
def __init__(self, apply_ml=False, apply_coperception=False, fusion_method='early'):

self.vehicle_id_set = set()
self._vehicle_manager_dict = {}
self._platooning_dict = {}
self._rsu_manager_dict = {}
self.ml_manager = None

if apply_ml:
if apply_ml and apply_coperception:
ml_manager = getattr(importlib.import_module(
"opencda.customize.ml_libs.opencood_manager"), 'OpenCOODManager')
self.ml_manager = ml_manager(fusion_method)
elif apply_ml:
# we import in this way so the user don't need to install ml
# packages unless they require to
ml_manager = getattr(importlib.import_module(
"opencda.customize.ml_libs.ml_manager"), 'MLManager')
# initialize the ml manager to load the DL/ML models into memory
self.ml_manager = ml_manager()

# this is used only when co-simulation activated.
self.sumo2carla_ids = {}

Expand Down
5 changes: 4 additions & 1 deletion opencda/core/common/rsu_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,10 @@ def __init__(
sensing_config['localization'],
self.carla_map)
# perception module
self.perception_manager = PerceptionManager(vehicle=None,
self.perception_manager = PerceptionManager(v2x_manager=None,
localization_manager=None,
behavior_agent=None,
vehicle=None,
config_yaml=sensing_config['perception'],
cav_world=cav_world,
carla_world=carla_world,
Expand Down
36 changes: 31 additions & 5 deletions opencda/core/common/v2x_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,11 @@ def __init__(self, cav_world, config_yaml, vid):
# ego position buffer. use deque so we can simulate lagging
self.ego_pos = deque(maxlen=100)
self.ego_spd = deque(maxlen=100)
# ego sensor buffer. Use deque so we can simulate lagging
self.ego_lidar = deque(maxlen=100)
self.ego_image = deque(maxlen=100)
# used to exclude the cav self during searching
self.ego_data = deque(maxlen=100)
self.vid = vid

# check if lag or noise needed to be added during communication
Expand All @@ -89,12 +93,14 @@ def __init__(self, cav_world, config_yaml, vid):
if 'lag' in config_yaml:
self.lag = config_yaml['lag']

def update_info(self, ego_pos, ego_spd):
def update_info(self, ego_pos, ego_spd, ego_lidar, ego_image):
"""
Update all communication plugins with current localization info.
"""
self.ego_pos.append(ego_pos)
self.ego_spd.append(ego_spd)
self.ego_lidar.append(ego_lidar)
self.ego_image.append(ego_image)
self.search()

# the ego pos in platooning_plugin is used for self-localization,
Expand Down Expand Up @@ -148,6 +154,20 @@ def get_ego_speed(self):

return processed_ego_speed

def get_ego_lidar(self):
if not self.ego_lidar:
return
lidar = self.ego_lidar[0] if len(self.ego_lidar) < self.lag else \
self.ego_lidar[-1 - int(abs(self.lag))]
return lidar

def get_ego_rgb_image(self):
if not self.ego_image:
return
image = self.ego_image[0] if len(self.ego_image) < self.lag else \
self.ego_image[-1 - int(abs(self.lag))]
return image

def search(self):
"""
Search the CAVs nearby.
Expand All @@ -156,7 +176,7 @@ def search(self):

for vid, vm in vehicle_manager_dict.items():
# avoid the Nonetype error at the first simulation step
if not vm.v2x_manager.get_ego_pos():
if not vm.v2x_manager.get_ego_pos() or not vm.v2x_manager.get_ego_lidar():
continue
# avoid add itself as the cav nearby
if vid == self.vid:
Expand All @@ -166,7 +186,13 @@ def search(self):
vm.v2x_manager.get_ego_pos().location)

if distance < self.communication_range:
self.cav_nearby.update({vid: vm})
self.cav_nearby.update({vm.vehicle.id: {
'vehicle_manager': vm,
'v2x_manager': vm.v2x_manager
}})
else:
self.cav_nearby.pop(vm.vehicle.id, None)

"""
-----------------------------------------------------------
Below is platooning related
Expand Down Expand Up @@ -277,7 +303,7 @@ def get_platoon_manager(self):
The ego vehicle's in team id.
"""
return self.platooning_plugin.platooning_object, \
self.platooning_plugin.in_id
self.platooning_plugin.in_id

def get_platoon_status(self):
"""
Expand All @@ -303,4 +329,4 @@ def get_platoon_front_rear(self):
Rear vehicle of the ego vehicle in the platoon.
"""
return self.platooning_plugin.front_vehicle, \
self.platooning_plugin.rear_vechile
self.platooning_plugin.rear_vechile
19 changes: 14 additions & 5 deletions opencda/core/common/vehicle_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,6 @@ def __init__(
# localization module
self.localizer = LocalizationManager(
vehicle, sensing_config['localization'], carla_map)
# perception module
self.perception_manager = PerceptionManager(
vehicle, sensing_config['perception'], cav_world,
data_dumping)
# map manager
self.map_manager = MapManager(vehicle,
carla_map,
Expand All @@ -125,6 +121,17 @@ def __init__(
# Control module
self.controller = ControlManager(control_config)

# perception module
# move it down here to pass in the behavior manager & localization manager
self.perception_manager = PerceptionManager(
v2x_manager=self.v2x_manager,
localization_manager=self.localizer,
behavior_agent=self.agent,
vehicle=vehicle,
config_yaml=sensing_config['perception'],
cav_world=cav_world,
data_dump=data_dumping)

if data_dumping:
self.data_dumper = DataDumper(self.perception_manager,
vehicle.id,
Expand Down Expand Up @@ -174,6 +181,8 @@ def update_info(self):

ego_pos = self.localizer.get_ego_pos()
ego_spd = self.localizer.get_ego_spd()
ego_lidar = self.perception_manager.lidar
ego_image = self.perception_manager.rgb_camera

# object detection
objects = self.perception_manager.detect(ego_pos)
Expand All @@ -183,7 +192,7 @@ def update_info(self):

# update ego position and speed to v2x manager,
# and then v2x manager will search the nearby cavs
self.v2x_manager.update_info(ego_pos, ego_spd)
self.v2x_manager.update_info(ego_pos, ego_spd, ego_lidar, ego_image)

self.agent.update_information(ego_pos, ego_spd, objects)
# pass position and speed info to controller
Expand Down
203 changes: 203 additions & 0 deletions opencda/core/sensing/perception/coperception_libs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
import weakref
import numpy as np

from opencda.core.common.misc import get_speed
from opencda.core.sensing.perception.obstacle_vehicle import \
ObstacleVehicle
import opencda.core.sensing.perception.sensor_transformation as st
from opencood.utils.transformation_utils import x1_to_x2


class CoperceptionLibs:
def __init__(self, lidar, rgb_camera, localization_manager, behavior_agent, carla_world, cav_world):
self.lidar = lidar
self.rgb_camera = rgb_camera
self.localizer = localization_manager
self.agent = behavior_agent
self.carla_world = weakref.ref(carla_world)()
self.cav_world = weakref.ref(cav_world)()
self.time_delay = self.calculate_time_delay()

@staticmethod
def matrix2list(matrix):
assert len(matrix.shape) == 2
return matrix.tolist()

@staticmethod
def calculate_time_delay():
"""
TODO: needs revision to reflect the delay
"""
return 0

@staticmethod
def load_vehicle_bbx(object_vehicle):
veh_pos = object_vehicle.get_transform()
veh_bbx = object_vehicle.bounding_box
veh_speed = get_speed(object_vehicle)
return {
"location": [veh_pos.location.x, veh_pos.location.y, veh_pos.location.z],
"center": [veh_bbx.location.x, veh_bbx.location.y, veh_bbx.location.z],
"angle": [veh_pos.rotation.roll, veh_pos.rotation.yaw, veh_pos.rotation.pitch],
"extent": [veh_bbx.extent.x, veh_bbx.extent.y, veh_bbx.extent.z],
"speed": veh_speed
}

def load_vehicles(self, ego_id, ego_pos, lidar):
def dist_to_ego(actor):
return actor.get_location().distance(ego_pos.location)

world = self.carla_world
vehicle_list = world.get_actors().filter("*vehicle*")
vehicle_list = [v for v in vehicle_list if dist_to_ego(v) < 50 and v.id != ego_id]
vehicle_dict = {}
if lidar:
for v in vehicle_list:
object_vehicle = ObstacleVehicle(None, None, v, lidar.sensor, self.cav_world.sumo2carla_ids)
vehicle_dict.update({object_vehicle.carla_id: self.load_vehicle_bbx(object_vehicle)})
else:
for v in vehicle_list:
object_vehicle = ObstacleVehicle(None, None, v, None, self.cav_world.sumo2carla_ids)
vehicle_dict.update({object_vehicle.carla_id: self.load_vehicle_bbx(object_vehicle)})
data = {
'vehicles': vehicle_dict
}
return data

@staticmethod
def load_transformation_matrix(is_ego, data):
"""
TODO: needs revision to reflect the cur/delay params
"""
cur_params = data
delay_params = data
cur_ego_params = data
delay_ego_params = data

delay_cav_lidar_pose = delay_params['lidar_pose']
delay_ego_lidar_pose = delay_ego_params["lidar_pose"]
cur_ego_lidar_pose = cur_ego_params['lidar_pose']
cur_cav_lidar_pose = cur_params['lidar_pose']

""""
TODO: needs to take in the loc_error_flag
if cav_content['ego'] and self.loc_err_flag:
delay_cav_lidar_pose = self.add_loc_noise(delay_cav_lidar_pose,
self.xyz_noise_std,
self.ryp_noise_std)
cur_cav_lidar_pose = self.add_loc_noise(cur_cav_lidar_pose,
self.xyz_noise_std,
self.ryp_noise_std)
"""
if is_ego:
transformation_matrix = x1_to_x2(delay_cav_lidar_pose, cur_ego_lidar_pose)
spatial_correction_matrix = np.eye(4)
else:
transformation_matrix = x1_to_x2(delay_cav_lidar_pose, delay_ego_lidar_pose)
spatial_correction_matrix = x1_to_x2(delay_ego_lidar_pose, cur_ego_lidar_pose)
gt_transformation_matrix = x1_to_x2(cur_cav_lidar_pose, cur_ego_lidar_pose)
return {
'transformation_matrix': transformation_matrix,
'gt_transformation_matrx': gt_transformation_matrix,
'spatial_correction_matrix': spatial_correction_matrix
}

@staticmethod
def load_cur_lidar_pose(lidar):
lidar_transformation = lidar.sensor.get_transform()
return {
'lidar_pose': [
lidar_transformation.location.x,
lidar_transformation.location.y,
lidar_transformation.location.z,
lidar_transformation.rotation.roll,
lidar_transformation.rotation.yaw,
lidar_transformation.rotation.pitch
]
}

def load_camera_data(self, lidar, rgb_camera):
data = {}
for (i, camera) in enumerate(rgb_camera):
camera_param = {}
camera_transformation = camera.sensor.get_transform()
camera_param.update({'cords': [
camera_transformation.location.x,
camera_transformation.location.y,
camera_transformation.location.z,
camera_transformation.rotation.roll,
camera_transformation.rotation.yaw,
camera_transformation.rotation.pitch
]})

# dump intrinsic matrix
camera_intrinsic = st.get_camera_intrinsic(camera.sensor)
camera_intrinsic = self.matrix2list(camera_intrinsic)
camera_param.update({'intrinsic': camera_intrinsic})

# dump extrinsic matrix lidar2camera
lidar2world = \
st.x_to_world_transformation(lidar.sensor.get_transform())
camera2world = \
st.x_to_world_transformation(camera.sensor.get_transform())

world2camera = np.linalg.inv(camera2world)
lidar2camera = np.dot(world2camera, lidar2world)
lidar2camera = self.matrix2list(lidar2camera)
camera_param.update({'extrinsic': lidar2camera})
data.update({'camera%d' % i: camera_param})
return data

def load_ego_data(self):
data = {
'predicted_ego_pos': [],
'true_ego_pos': [],
'ego_speed': 0.0
}
if not self.localizer:
return data

predicted_ego_pos = self.localizer.get_ego_pos()
true_ego_pos = self.localizer.vehicle.get_transform() \
if hasattr(self.localizer, 'vehicle') \
else self.localizer.true_ego_pos
data = {
'predicted_ego_pos': [
predicted_ego_pos.location.x,
predicted_ego_pos.location.y,
predicted_ego_pos.location.z,
predicted_ego_pos.rotation.roll,
predicted_ego_pos.rotation.yaw,
predicted_ego_pos.rotation.pitch
],
'true_ego_pos': [
true_ego_pos.location.x,
true_ego_pos.location.y,
true_ego_pos.location.z,
true_ego_pos.rotation.roll,
true_ego_pos.rotation.yaw,
true_ego_pos.rotation.pitch
],
'ego_speed': float(self.localizer.get_ego_spd())
}
return data

def load_plan_trajectory(self, agent):
data = {'RSU': True}
if agent is not None:
trajectory_deque = \
self.agent.get_local_planner().get_trajectory()
trajectory_list = []

for i in range(len(trajectory_deque)):
buffer = trajectory_deque.popleft()
x = buffer[0].location.x
y = buffer[0].location.y
speed = buffer[1]
trajectory_list.append([x, y, speed])

data = {
'plan_trajectory': trajectory_list,
'RSU': False
}
return data
Loading