From 79a123c78b4268e071c18abedf8c60b18047ccca Mon Sep 17 00:00:00 2001 From: Takahiro Date: Mon, 4 Dec 2023 14:05:13 +0100 Subject: [PATCH] Cleaned up and using channel_info for image topic. --- elevation_map_msgs/CMakeLists.txt | 2 +- elevation_map_msgs/msg/ChannelInfo.msg | 2 + elevation_map_msgs/msg/FusionInfo.msg | 3 - elevation_mapping_cupy/compile_commands.json | 1 + .../anymal/anymal_sensor_parameter.yaml | 40 ++--- .../turtle_bot/turtle_bot_semantics.yaml | 47 +++--- .../setups/turtle_bot/turtle_bot_simple.yaml | 15 +- .../elevation_mapping_ros.hpp | 36 +++-- .../elevation_mapping_wrapper.hpp | 2 +- .../elevation_mapping.py | 12 +- .../elevation_mapping_cupy/parameter.py | 29 +--- .../plugins/plugin_manager.py | 2 +- .../elevation_mapping_cupy/semantic_map.py | 102 ++++++------- .../src/elevation_mapping_ros.cpp | 137 ++++++++++-------- .../src/elevation_mapping_wrapper.cpp | 34 ++++- 15 files changed, 236 insertions(+), 228 deletions(-) create mode 100644 elevation_map_msgs/msg/ChannelInfo.msg delete mode 100644 elevation_map_msgs/msg/FusionInfo.msg create mode 120000 elevation_mapping_cupy/compile_commands.json diff --git a/elevation_map_msgs/CMakeLists.txt b/elevation_map_msgs/CMakeLists.txt index 653174ef..2d73f5e5 100644 --- a/elevation_map_msgs/CMakeLists.txt +++ b/elevation_map_msgs/CMakeLists.txt @@ -11,7 +11,7 @@ find_package(catkin REQUIRED COMPONENTS add_message_files( FILES Statistics.msg - FusionInfo.msg + ChannelInfo.msg ) ## Generate services in the 'srv' folder diff --git a/elevation_map_msgs/msg/ChannelInfo.msg b/elevation_map_msgs/msg/ChannelInfo.msg new file mode 100644 index 00000000..bccf1447 --- /dev/null +++ b/elevation_map_msgs/msg/ChannelInfo.msg @@ -0,0 +1,2 @@ +Header header +string[] channels # channel names for each layer \ No newline at end of file diff --git a/elevation_map_msgs/msg/FusionInfo.msg b/elevation_map_msgs/msg/FusionInfo.msg deleted file mode 100644 index 4cfd1718..00000000 --- a/elevation_map_msgs/msg/FusionInfo.msg +++ /dev/null @@ -1,3 +0,0 @@ -Header header -string[] channels # channel names for each layer -string[] fusion_methods # fusion methods to use for each channel \ No newline at end of file diff --git a/elevation_mapping_cupy/compile_commands.json b/elevation_mapping_cupy/compile_commands.json new file mode 120000 index 00000000..2fdee343 --- /dev/null +++ b/elevation_mapping_cupy/compile_commands.json @@ -0,0 +1 @@ +/home/takahiro/catkin_ws/build/elevation_mapping_cupy/compile_commands.json \ No newline at end of file diff --git a/elevation_mapping_cupy/config/setups/anymal/anymal_sensor_parameter.yaml b/elevation_mapping_cupy/config/setups/anymal/anymal_sensor_parameter.yaml index 90cc74fb..4ad52176 100644 --- a/elevation_mapping_cupy/config/setups/anymal/anymal_sensor_parameter.yaml +++ b/elevation_mapping_cupy/config/setups/anymal/anymal_sensor_parameter.yaml @@ -1,5 +1,15 @@ plugin_config_file: '$(rospack find elevation_mapping_cupy)/config/setups/anymal/anymal_plugin_config.yaml' +pointcloud_channel_fusions: + rgb: 'color' + default: 'average' + +image_channel_fusions: + rgb: 'color' + default: 'exponential' + feat_.*: 'exponential' + sem_.*: 'exponential' + #### Publishers ######## # topic_name: # layers: # Choose from 'elevation', 'variance', 'traversability', 'time', 'normal_x', 'normal_y', 'normal_z', 'color', plugin_layer_names @@ -8,7 +18,7 @@ plugin_config_file: '$(rospack find elevation_mapping_cupy)/config/setups/anymal publishers: elevation_map_raw: - layers: ['elevation', 'traversability', 'variance'] + layers: ['elevation', 'traversability', 'variance', 'rgb'] basic_layers: ['elevation', 'traversability'] fps: 5.0 @@ -20,55 +30,47 @@ publishers: #### Subscribers ######## subscribers: front_upper_depth: - channels: [] - fusion: [] topic_name: /depth_camera_front_upper/point_cloud_self_filtered data_type: pointcloud front_lower_depth: - channels: [] - fusion: [] topic_name: /depth_camera_front_lower/point_cloud_self_filtered data_type: pointcloud rear_upper_depth: - channels: [] - fusion: [] topic_name: /depth_camera_rear_upper/point_cloud_self_filtered data_type: pointcloud rear_lower_depth: - channels: [] - fusion: [] topic_name: /depth_camera_rear_lower/point_cloud_self_filtered data_type: pointcloud left_depth: - channels: [] - fusion: [] topic_name: /depth_camera_left/point_cloud_self_filtered data_type: pointcloud right_depth: - channels: [] - fusion: [] topic_name: /depth_camera_right/point_cloud_self_filtered data_type: pointcloud + front_wide_angle: + topic_name: /wide_angle_camera_front/image_raw + camera_info_topic_name: /wide_angle_camera_front/camera_info + data_type: image + + rear_wide_angle: + topic_name: /wide_angle_camera_rear/image_raw + camera_info_topic_name: /wide_angle_camera_rear/camera_info + data_type: image + # velodyne: - # channels: [] - # fusion: [] # topic_name: /point_cloud_filter/lidar/point_cloud_filtered # data_type: pointcloud front_bpearl: - channels: [] - fusion: [] topic_name: /robot_self_filter/bpearl_front/point_cloud data_type: pointcloud rear_bpearl: - channels: [] - fusion: [] topic_name: /robot_self_filter/bpearl_rear/point_cloud data_type: pointcloud \ No newline at end of file diff --git a/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics.yaml b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics.yaml index 9c4019bb..63d3b2b9 100644 --- a/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics.yaml +++ b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics.yaml @@ -1,47 +1,40 @@ #### Plugins ######## plugin_config_file: '$(rospack find elevation_mapping_cupy)/config/setups/turtle_bot/plugin_config.yaml' -pointcloud_channel_fusion: +pointcloud_channel_fusions: + none: 'none' + # rgb: 'color' + # default: 'average' + +image_channel_fusions: rgb: 'color' - default: 'average' - sheep: 'average' - sofa: 'class_average' - person: 'class_average' + default: 'exponential' + feat_.*: 'exponential' #### Subscribers ######## subscribers: color_cam: # for color camera - # channels: ['rgb'] - # fusion: ['color'] topic_name: '/camera/rgb/image_raw' camera_info_topic_name: '/camera/depth/camera_info' data_type: image - # front_cam: - # channels: ['rgb', 'sheep','sofa',"person" ] - # fusion: [ 'color','class_average','class_average','class_average' ] - # topic_name: '/elevation_mapping/pointcloud_semantic' - # semantic_segmentation: True - # publish_segmentation_image: True - # segmentation_model: 'lraspp_mobilenet_v3_large' # detectron_coco_panoptic_fpn_R_101_3x, lraspp_mobilenet_v3_large - # show_label_legend: False - - # cam_info_topic: "/camera/rgb/camera_info" - # image_topic: "/camera/rgb/image_raw" - # cam_frame: "camera_rgb_optical_frame" - # data_type: image - # depth_topic: "/camera/depth/image_raw" - semantic_cam: # for color camera - # channels: ['rgb'] - # fusion: ['color'] + semantic_cam: # for semantic images topic_name: '/front_cam/semantic_image' camera_info_topic_name: '/front_cam/camera/depth/camera_info_resized' - fusion_info_topic_name: '/front_cam/fusion_info' + channel_info_topic_name: '/front_cam/channel_info' data_type: image + # fixed_semantic_cam: # for semantic images + # topic_name: '/front_cam/semantic_image' + # camera_info_topic_name: '/front_cam/camera/depth/camera_info_resized' + # channels: ["test", "test2", "test3"] + # data_type: image front_cam_pointcloud: - channels: [] - fusion: [] topic_name: '/camera/depth/points' data_type: pointcloud + feat_front: + topic_name: /elevation_mapping/feat_f + camera_info_topic_name: "/camera/depth/camera_info" + channels: [ 'feat_0','feat_1','feat_2','feat_3','feat_4','feat_5','feat_6','feat_7','feat_8','feat_9' ] + data_type: image #### Publishers ######## diff --git a/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_simple.yaml b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_simple.yaml index cfa3b3cb..a76fcc61 100644 --- a/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_simple.yaml +++ b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_simple.yaml @@ -1,11 +1,14 @@ +pointcloud_channel_fusions: + rgb: 'color' + default: 'average' + +image_channel_fusions: + rgb: 'color' + default: 'exponential' + feat_.*: 'exponential' + subscribers: -# sensor_name: -# channels: ['feat_0','feat_1'] -# fusion: ['average','average'] -# topic_name: '/elevation_mapping/pointcloud_semantic' front_cam: - channels: ['rgb' ] - fusion: [ 'color'] topic_name: '/camera/depth/points' data_type: pointcloud diff --git a/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_ros.hpp b/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_ros.hpp index ab873aa1..0149ffad 100644 --- a/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_ros.hpp +++ b/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_ros.hpp @@ -49,7 +49,7 @@ #include #include -#include +#include #include "elevation_mapping_cupy/elevation_mapping_wrapper.hpp" @@ -65,26 +65,37 @@ class ElevationMappingNode { using ImageSubscriber = image_transport::SubscriberFilter; using ImageSubscriberPtr = std::shared_ptr; + + // Subscriber and Synchronizer for CameraInfo messages using CameraInfoSubscriber = message_filters::Subscriber; using CameraInfoSubscriberPtr = std::shared_ptr; using CameraPolicy = message_filters::sync_policies::ApproximateTime; using CameraSync = message_filters::Synchronizer; using CameraSyncPtr = std::shared_ptr; - using FusionInfoSubscriber = message_filters::Subscriber; - using FusionInfoSubscriberPtr = std::shared_ptr; - using CameraFusionPolicy = message_filters::sync_policies::ApproximateTime; - using CameraFusionSync = message_filters::Synchronizer; - using CameraFusionSyncPtr = std::shared_ptr; + // Subscriber and Synchronizer for ChannelInfo messages + using ChannelInfoSubscriber = message_filters::Subscriber; + using ChannelInfoSubscriberPtr = std::shared_ptr; + using CameraChannelPolicy = message_filters::sync_policies::ApproximateTime; + using CameraChannelSync = message_filters::Synchronizer; + using CameraChannelSyncPtr = std::shared_ptr; + + // Subscriber and Synchronizer for Pointcloud messages + using PointCloudSubscriber = message_filters::Subscriber; + using PointCloudSubscriberPtr = std::shared_ptr; + using PointCloudPolicy = message_filters::sync_policies::ApproximateTime; + using PointCloudSync = message_filters::Synchronizer; + using PointCloudSyncPtr = std::shared_ptr; private: void readParameters(); void setupMapPublishers(); void pointcloudCallback(const sensor_msgs::PointCloud2& cloud, const std::string& key); - void inputImage(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg, - const std::vector& channels, const std::vector& fusion_methods); - void imageCallback(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg); - void imageFusionCallback(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg, const elevation_map_msgs::FusionInfoConstPtr& fusion_info_msg); + void inputPointCloud(const sensor_msgs::PointCloud2& cloud, const std::vector& channels); + void inputImage(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg, const std::vector& channels); + void imageCallback(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg, const std::string& key); + void imageChannelCallback(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg, const elevation_map_msgs::ChannelInfoConstPtr& channel_info_msg); + void pointCloudChannelCallback(const sensor_msgs::PointCloud2& cloud, const elevation_map_msgs::ChannelInfoConstPtr& channel_info_msg); // void multiLayerImageCallback(const elevation_map_msgs::MultiLayerImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg); void publishAsPointCloud(const grid_map::GridMap& map) const; bool getSubmap(grid_map_msgs::GetGridMap::Request& request, grid_map_msgs::GetGridMap::Response& response); @@ -110,9 +121,10 @@ class ElevationMappingNode { std::vector pointcloudSubs_; std::vector imageSubs_; std::vector cameraInfoSubs_; - std::vector fusionInfoSubs_; + std::vector channelInfoSubs_; std::vector cameraSyncs_; - std::vector cameraFusionSyncs_; + std::vector cameraChannelSyncs_; + std::vector pointCloudSyncs_; std::vector mapPubs_; tf::TransformBroadcaster tfBroadcaster_; ros::Publisher alivePub_; diff --git a/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_wrapper.hpp b/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_wrapper.hpp index b6d633da..116403e6 100644 --- a/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_wrapper.hpp +++ b/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_wrapper.hpp @@ -48,7 +48,7 @@ class ElevationMappingWrapper { void input(const RowMatrixXd& points, const std::vector& channels, const RowMatrixXd& R, const Eigen::VectorXd& t, const double positionNoise, const double orientationNoise); - void input_image(const std::vector& multichannel_image, const std::vector& channels, const std::vector& fusion_methods, const RowMatrixXd& R, + void input_image(const std::vector& multichannel_image, const std::vector& channels, const RowMatrixXd& R, const Eigen::VectorXd& t, const RowMatrixXd& cameraMatrix, int height, int width); void move_to(const Eigen::VectorXd& p, const RowMatrixXd& R); void clear(); diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py index 22fde76a..e7011529 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py @@ -469,7 +469,7 @@ def input_image( self, image: List[cp._core.core.ndarray], channels: List[str], - fusion_methods: List[str], + # fusion_methods: List[str], R: cp._core.core.ndarray, t: cp._core.core.ndarray, K: cp._core.core.ndarray, @@ -490,9 +490,6 @@ def input_image( Returns: None: """ - print("input_image") - print("channels", channels) - print("fusion_methods", fusion_methods) image = np.stack(image, axis=0) if len(image.shape) == 2: image = image[None] @@ -517,9 +514,6 @@ def input_image( self.valid_correspondence[:, :] = False # self.distance_correspondence *= 0.0 - print("channels", channels) - print("fusion_methods", fusion_methods) - with self.map_lock: self.image_to_map_correspondence_kernel( self.elevation_map, @@ -532,15 +526,11 @@ def input_image( self.center, self.uv_correspondence, self.valid_correspondence, - # self.distance_correspondence, size=int(self.cell_n * self.cell_n), ) - # print("distance_correspondence", self.distance_correspondence) self.semantic_map.update_layers_image( - # sub_key, image, channels, - fusion_methods, self.uv_correspondence, self.valid_correspondence, image_height, diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/parameter.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/parameter.py index 40ac3dd9..88b111ed 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/parameter.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/parameter.py @@ -16,26 +16,12 @@ class Parameter(Serializable): subscriber_cfg: dict = field( default_factory=lambda: { "front_cam": { - "cam_frame": "zed2i_right_camera_optical_frame", - "cam_info_topic": "/zed2i/zed_node/depth/camera_info", "channels": ["rgb", "person"], - "confidence": True, - "confidence_threshold": 10, - "confidence_topic": "/zed2i/zed_node/confidence/confidence_map", - "depth_topic": "/zed2i/zed_node/depth/depth_registered", - "feature_extractor": False, - "fusion": ["color", "class_average"], - "image_topic": "/zed2i/zed_node/left/image_rect_color", - "segmentation_model": "lraspp_mobilenet_v3_large", - "semantic_segmentation": True, - "show_label_legend": True, "topic_name": "/elevation_mapping/pointcloud_semantic", "data_type": "pointcloud", } } ) - # additional_layers: list = field(default_factory=lambda: ["feat_0"]) - # fusion_algorithms: list = field(default_factory=lambda: ["average"]) additional_layers: list = field(default_factory=lambda: ["color"]) fusion_algorithms: list = field(default_factory=lambda: [ "image_color", @@ -46,7 +32,8 @@ class Parameter(Serializable): "pointcloud_class_bayesian", "pointcloud_class_max", "pointcloud_color"]) - pointcloud_channel_fusion: dict = field(default_factory=lambda: {"rgb": "pointcloud_color", "default": "pointcloud_class_average"}) + pointcloud_channel_fusions: dict = field(default_factory=lambda: {"rgb": "color", "default": "class_average"}) + image_channel_fusions: dict = field(default_factory=lambda: {"rgb": "color", "default": "exponential"}) data_type: str = np.float32 average_weight: float = 0.5 @@ -138,18 +125,6 @@ def update(self): self.cell_n = int(round(self.map_length / self.resolution)) + 2 self.true_cell_n = round(self.map_length / self.resolution) self.true_map_length = self.true_cell_n * self.resolution - # semantic_layers = [] - # fusion_algorithms = [] - # for subscriber, sub_val in self.subscriber_cfg.items(): - # channels = sub_val["channels"] - # fusion = sub_val["fusion"] - # for i in range(len(channels)): - # name = channels[i] - # if name not in semantic_layers: - # semantic_layers.append(name) - # fusion_algorithms.append(fusion[i]) - # self.additional_layers = semantic_layers - # self.fusion_algorithms = fusion_algorithms if __name__ == "__main__": diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/plugins/plugin_manager.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/plugins/plugin_manager.py index fd1185e1..b5c2d313 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/plugins/plugin_manager.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/plugins/plugin_manager.py @@ -146,7 +146,7 @@ def update_with_name( elements_to_shift={}, ): idx = self.get_layer_index_with_name(name) - if idx is not None: + if idx is not None and idx < len(self.plugins): n_param = len(signature(self.plugins[idx]).parameters) if n_param == 5: self.layers[idx] = self.plugins[idx](elevation_map, layer_names, self.layers, self.layer_names) diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/semantic_map.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/semantic_map.py index e33585f1..6b7291ca 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/semantic_map.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/semantic_map.py @@ -1,7 +1,9 @@ from elevation_mapping_cupy.parameter import Parameter import cupy as cp import numpy as np -from typing import List +from typing import List, Dict +import re + from elevation_mapping_cupy.fusion.fusion_manager import FusionManager @@ -18,36 +20,13 @@ def __init__(self, param: Parameter): self.param = param - self.layer_specs = {} + self.layer_specs_points = {} + self.layer_specs_image = {} self.layer_names = [] self.unique_fusion = [] self.unique_data = [] self.elements_to_shift = {} - # for k, config in self.param.subscriber_cfg.items(): - # for f, c in zip(config["fusion"], config["channels"]): - # if c not in self.layer_names: - # self.layer_names.append(c) - # self.layer_specs[c] = f - # else: - # assert self.layer_specs[c] == f, "Error: Single layer has multiple fusion algorithms!" - # if f not in self.unique_fusion: - # dt = config["data_type"] - # self.unique_fusion.append(dt + "_" + f) - for channel, fusion in self.param.pointcloud_channel_fusion.items(): - if channel not in self.layer_names: - if channel != "default": - self.layer_names.append(channel) - self.layer_specs[channel] = fusion - else: - assert self.layer_specs[channel] == fusion, "Error: Single layer has multiple fusion algorithms!" - # for f, c in zip(config["fusion"], config["channels"]): - # else: - # assert self.layer_specs[c] == f, "Error: Single layer has multiple fusion algorithms!" - # if f not in self.unique_fusion: - # dt = config["data_type"] - # self.unique_fusion.append(dt + "_" + f) - self.layer_names += self.param.additional_layers self.unique_fusion = self.param.fusion_algorithms self.amount_layer_names = len(self.layer_names) @@ -73,10 +52,10 @@ def initialize_fusion(self): """Initialize the fusion algorithms.""" for fusion in self.unique_fusion: if "pointcloud_class_bayesian" == fusion: - pcl_ids = self.get_layer_indices("class_bayesian") + pcl_ids = self.get_layer_indices("class_bayesian", self.layer_specs_points) self.delete_new_layers[pcl_ids] = 0 if "pointcloud_class_max" == fusion: - pcl_ids = self.get_layer_indices("class_max") + pcl_ids = self.get_layer_indices("class_max", self.layer_specs_points) self.delete_new_layers[pcl_ids] = 0 layer_cnt = self.param.fusion_algorithms.count("class_max") id_max = cp.zeros( @@ -87,12 +66,15 @@ def initialize_fusion(self): self.fusion_manager.register_plugin(fusion) def update_fusion_setting(self): + """ + Update the fusion settings. + """ for fusion in self.unique_fusion: if "pointcloud_class_bayesian" == fusion: - pcl_ids = self.get_layer_indices("class_bayesian") + pcl_ids = self.get_layer_indices("class_bayesian", self.layer_specs_points) self.delete_new_layers[pcl_ids] = 0 if "pointcloud_class_max" == fusion: - pcl_ids = self.get_layer_indices("class_max") + pcl_ids = self.get_layer_indices("class_max", self.layer_specs_points) self.delete_new_layers[pcl_ids] = 0 layer_cnt = self.param.fusion_algorithms.count("class_max") id_max = cp.zeros( @@ -165,7 +147,7 @@ def shift_map_xy(self, shift_value): el = cp.roll(el, shift_value, axis=(1, 2)) self.pad_value(el, shift_value, value=0.0) - def get_fusion_of_pcl(self, channels: List[str]) -> List[str]: + def get_fusion(self, channels: List[str], channel_fusions: Dict[str, str], layer_specs: Dict[str, str]) -> List[str]: """Get all fusion algorithms that need to be applied to a specific pointcloud. Args: @@ -174,24 +156,38 @@ def get_fusion_of_pcl(self, channels: List[str]) -> List[str]: fusion_list = [] process_channels = [] for channel in channels: - if channel not in self.layer_specs: + if channel not in layer_specs: # If the channel is not in the layer_specs, we use the default fusion algorithm - if "default" in self.param.pointcloud_channel_fusion: - default_fusion = self.param.pointcloud_channel_fusion["default"] - print(f"[WARNING] Layer {channel} not found in layer_specs. Using {default_fusion} algorithm as default.") - self.layer_specs[channel] = default_fusion - self.update_fusion_setting() - # If there's no default fusion algorithm, we skip this channel + matched_fusion = self.get_matching_fusion(channel, channel_fusions) + if matched_fusion is None: + if "default" in channel_fusions: + default_fusion = channel_fusions["default"] + print(f"[WARNING] Layer {channel} not found in layer_specs. Using {default_fusion} algorithm as default.") + layer_specs[channel] = default_fusion + self.update_fusion_setting() + # If there's no default fusion algorithm, we skip this channel + else: + print(f"[WARNING] Layer {channel} not found in layer_specs ({layer_specs}) and no default fusion is configured. Skipping.") + continue else: - print(f"[WARNING] Layer {channel} not found in layer_specs. Skipping.") - continue - x = self.layer_specs[channel] + layer_specs[channel] = matched_fusion + self.update_fusion_setting() + x = layer_specs[channel] if x not in fusion_list: fusion_list.append(x) process_channels.append(channel) return process_channels, fusion_list - def get_layer_indices(self, fusion_alg): + + def get_matching_fusion(self, channel: str, fusion_algs: Dict[str, str]): + """ Use regular expression to check if the fusion algorithm matches the channel name.""" + for fusion_alg, alg_value in fusion_algs.items(): + if re.match(f"^{fusion_alg}$", channel): + return alg_value + return None + + + def get_layer_indices(self, fusion_alg, layer_specs): """Get the indices of the layers that are used for a specific fusion algorithm. Args: @@ -201,12 +197,12 @@ def get_layer_indices(self, fusion_alg): cp.array: indices of the layers """ layer_indices = cp.array([], dtype=cp.int32) - for it, (key, val) in enumerate(self.layer_specs.items()): + for it, (key, val) in enumerate(layer_specs.items()): if key in val == fusion_alg: layer_indices = cp.append(layer_indices, it).astype(cp.int32) return layer_indices - def get_indices_fusion(self, pcl_channels: List[str], fusion_alg: str): + def get_indices_fusion(self, pcl_channels: List[str], fusion_alg: str, layer_specs: Dict[str, str]): """Computes the indices of the channels of the pointcloud and the layers of the semantic map of type fusion_alg. Args: @@ -219,7 +215,7 @@ def get_indices_fusion(self, pcl_channels: List[str], fusion_alg: str): """ # this contains exactly the fusion alg type for each channel of the pcl - pcl_val_list = [self.layer_specs[x] for x in pcl_channels] + pcl_val_list = [layer_specs[x] for x in pcl_channels] # this contains the indices of the point cloud where we have to perform a certain fusion pcl_indices = cp.array( [idp + 3 for idp, x in enumerate(pcl_val_list) if x == fusion_alg], @@ -227,7 +223,7 @@ def get_indices_fusion(self, pcl_channels: List[str], fusion_alg: str): ) # create a list of indices of the layers that will be updated by the point cloud with specific fusion alg layer_indices = cp.array([], dtype=cp.int32) - for it, (key, val) in enumerate(self.layer_specs.items()): + for it, (key, val) in enumerate(layer_specs.items()): if key in pcl_channels and val == fusion_alg: layer_idx = self.layer_names.index(key) layer_indices = cp.append(layer_indices, layer_idx).astype(cp.int32) @@ -243,7 +239,7 @@ def update_layers_pointcloud(self, points_all, channels, R, t, elevation_map): t: translation vector elevation_map: elevation map object """ - process_channels, additional_fusion = self.get_fusion_of_pcl(channels) + process_channels, additional_fusion = self.get_fusion(channels, self.param.pointcloud_channel_fusions, self.layer_specs_points) # If channels has a new layer that is not in the semantic map, add it for channel in process_channels: if channel not in self.layer_names: @@ -254,7 +250,7 @@ def update_layers_pointcloud(self, points_all, channels, R, t, elevation_map): self.new_map[self.delete_new_layers] = 0.0 for fusion in list(set(additional_fusion)): # which layers need to be updated with this fusion algorithm - pcl_ids, layer_ids = self.get_indices_fusion(process_channels, fusion) + pcl_ids, layer_ids = self.get_indices_fusion(process_channels, fusion, self.layer_specs_points) # update the layers with the fusion algorithm self.fusion_manager.execute_plugin( fusion, @@ -274,7 +270,7 @@ def update_layers_image( # sub_key: str, image: cp._core.core.ndarray, channels: List[str], - fusion_methods: List[str], + # fusion_methods: List[str], uv_correspondence: cp._core.core.ndarray, valid_correspondence: cp._core.core.ndarray, image_height: cp._core.core.ndarray, @@ -292,11 +288,12 @@ def update_layers_image( """ # additional_fusion = self.get_fusion_of_pcl(channels) + process_channels, fusion_methods = self.get_fusion(channels, self.param.image_channel_fusions, self.layer_specs_image) self.new_map[self.delete_new_layers] = 0.0 # config = self.param.subscriber_cfg[sub_key] # for j, (fusion, channel) in enumerate(zip(config["fusion"], config["channels"])): - for j, (fusion, channel) in enumerate(zip(fusion_methods, channels)): + for j, (fusion, channel) in enumerate(zip(fusion_methods, process_channels)): if channel not in self.layer_names: print(f"Layer {channel} not found, adding it to the semantic map") self.add_layer(channel) @@ -350,7 +347,10 @@ def get_map_with_name(self, name): cp.array: map """ # If the layer is a color layer, return the rgb map - if name in self.layer_specs and (self.layer_specs[name] == "image_color" or self.layer_specs[name] == "pointcloud_color"): + if name in self.layer_specs_points and self.layer_specs_points[name] == "color": + m = self.get_rgb(name) + return m + elif name in self.layer_specs_image and self.layer_specs_image[name] == "color": m = self.get_rgb(name) return m else: diff --git a/elevation_mapping_cupy/src/elevation_mapping_ros.cpp b/elevation_mapping_cupy/src/elevation_mapping_ros.cpp index 849f0022..21412e95 100644 --- a/elevation_mapping_cupy/src/elevation_mapping_ros.cpp +++ b/elevation_mapping_cupy/src/elevation_mapping_ros.cpp @@ -80,19 +80,14 @@ ElevationMappingNode::ElevationMappingNode(ros::NodeHandle& nh) // Initialize subscribers depending on the type if (type == "pointcloud") { std::string pointcloud_topic = subscriber.second["topic_name"]; - boost::function f = boost::bind(&ElevationMappingNode::pointcloudCallback, this, _1, key); - ros::Subscriber sub = nh_.subscribe(pointcloud_topic, 1, f); - pointcloudSubs_.push_back(sub); - const auto& channels = subscriber.second["channels"]; channels_[key].push_back("x"); channels_[key].push_back("y"); channels_[key].push_back("z"); - for (int32_t i = 0; i < channels.size(); ++i) { - auto elem = static_cast(channels[i]); - channels_[key].push_back(elem); - } - - } else if (type == "image") { + boost::function f = boost::bind(&ElevationMappingNode::pointcloudCallback, this, _1, key); + ros::Subscriber sub = nh_.subscribe(pointcloud_topic, 1, f); + pointcloudSubs_.push_back(sub); + } + else if (type == "image") { std::string camera_topic = subscriber.second["topic_name"]; std::string info_topic = subscriber.second["camera_info_topic_name"]; @@ -117,20 +112,32 @@ ElevationMappingNode::ElevationMappingNode(ros::NodeHandle& nh) cam_info_sub->subscribe(nh_, info_topic, 1); cameraInfoSubs_.push_back(cam_info_sub); - std::string fusion_info_topic; - if (subscriber.second.hasMember("fusion_info_topic_name")) { - std::string fusion_info_topic = subscriber.second["fusion_info_topic_name"]; - FusionInfoSubscriberPtr fusion_info_sub = std::make_shared(); - fusion_info_sub->subscribe(nh_, fusion_info_topic, 1); - fusionInfoSubs_.push_back(fusion_info_sub); - CameraFusionSyncPtr sync = std::make_shared(CameraFusionPolicy(10), *image_sub, *cam_info_sub, *fusion_info_sub); - sync->registerCallback(boost::bind(&ElevationMappingNode::imageFusionCallback, this, _1, _2, _3)); - cameraFusionSyncs_.push_back(sync); + std::string channel_info_topic; + // If there is channel info topic setting, we use it. + if (subscriber.second.hasMember("channel_info_topic_name")) { + std::string channel_info_topic = subscriber.second["channel_info_topic_name"]; + ChannelInfoSubscriberPtr channel_info_sub = std::make_shared(); + channel_info_sub->subscribe(nh_, channel_info_topic, 1); + channelInfoSubs_.push_back(channel_info_sub); + CameraChannelSyncPtr sync = std::make_shared(CameraChannelPolicy(10), *image_sub, *cam_info_sub, *channel_info_sub); + sync->registerCallback(boost::bind(&ElevationMappingNode::imageChannelCallback, this, _1, _2, _3)); + cameraChannelSyncs_.push_back(sync); } else { - ROS_INFO_STREAM("Fusion info topic not found"); + // If there is channels setting, we use it. Otherwise, we use rgb as default. + if (subscriber.second.hasMember("channels")) { + const auto& channels = subscriber.second["channels"]; + for (int32_t i = 0; i < channels.size(); ++i) { + auto elem = static_cast(channels[i]); + channels_[key].push_back(elem); + } + } + else { + channels_[key].push_back("rgb"); + } + ROS_INFO_STREAM("Channel info topic not found for " << camera_topic << ". Using channels: " << boost::algorithm::join(channels_[key], ", ")); CameraSyncPtr sync = std::make_shared(CameraPolicy(10), *image_sub, *cam_info_sub); - sync->registerCallback(boost::bind(&ElevationMappingNode::imageCallback, this, _1, _2)); + sync->registerCallback(boost::bind(&ElevationMappingNode::imageCallback, this, _1, _2, key)); cameraSyncs_.push_back(sync); } @@ -291,39 +298,40 @@ void ElevationMappingNode::publishMapOfIndex(int index) { } void ElevationMappingNode::pointcloudCallback(const sensor_msgs::PointCloud2& cloud, const std::string& key) { - auto start = ros::Time::now(); - // transform pointcloud into matrix - auto* pcl_pc = new pcl::PCLPointCloud2; - pcl::PCLPointCloud2ConstPtr cloudPtr(pcl_pc); - pcl_conversions::toPCL(cloud, *pcl_pc); // get channels auto fields = cloud.fields; std::vector channels; - std::vector add_element; for (int it = 0; it < fields.size(); it++) { auto& field = fields[it]; - if ((std::find(channels_[key].begin(), channels_[key].end(), field.name) != channels_[key].end()) && (field.datatype == 7)) { - add_element.push_back(true); - channels.push_back(field.name); - } else - add_element.push_back(false); + channels.push_back(field.name); } + inputPointCloud(cloud, channels); + + // This is used for publishing as statistics. + pointCloudProcessCounter_++; +} + +void ElevationMappingNode::inputPointCloud(const sensor_msgs::PointCloud2& cloud, + const std::vector& channels) { + auto start = ros::Time::now(); + auto* pcl_pc = new pcl::PCLPointCloud2; + pcl::PCLPointCloud2ConstPtr cloudPtr(pcl_pc); + pcl_conversions::toPCL(cloud, *pcl_pc); + + // get channels + auto fields = cloud.fields; uint array_dim = channels.size(); RowMatrixXd points = RowMatrixXd(pcl_pc->width * pcl_pc->height, array_dim); for (unsigned int i = 0; i < pcl_pc->width * pcl_pc->height; ++i) { - int jit = 0; - for (unsigned int j = 0; j < add_element.size(); ++j) { - if (add_element[j]) { - float temp; - uint point_idx = i * pcl_pc->point_step + pcl_pc->fields[j].offset; - memcpy(&temp, &pcl_pc->data[point_idx], sizeof(float)); - points(i, jit) = static_cast(temp); - jit++; - } + for (unsigned int j = 0; j < channels.size(); ++j) { + float temp; + uint point_idx = i * pcl_pc->point_step + pcl_pc->fields[j].offset; + memcpy(&temp, &pcl_pc->data[point_idx], sizeof(float)); + points(i, j) = static_cast(temp); } } // get pose of sensor in map frame @@ -358,14 +366,12 @@ void ElevationMappingNode::pointcloudCallback(const sensor_msgs::PointCloud2& cl (ros::Time::now() - start).toSec()); ROS_DEBUG_THROTTLE(1.0, "positionError: %f ", positionError); ROS_DEBUG_THROTTLE(1.0, "orientationError: %f ", orientationError); - // This is used for publishing as statistics. - pointCloudProcessCounter_++; + } void ElevationMappingNode::inputImage(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg, - const std::vector& channels, - const std::vector& fusion_methods) { + const std::vector& channels) { // Get image cv::Mat image = cv_bridge::toCvShare(image_msg, image_msg->encoding)->image; @@ -403,37 +409,42 @@ void ElevationMappingNode::inputImage(const sensor_msgs::ImageConstPtr& image_ms multichannel_image.push_back(eigen_img); } + // Check if the size of multichannel_image and channels and channel_methods matches. "rgb" counts for 3 layers. + int total_channels = 0; + for (const auto& channel : channels) { + if (channel == "rgb") { + total_channels += 3; + } else { + total_channels += 1; + } + } + if (total_channels != multichannel_image.size()) { + ROS_ERROR("Mismatch in the size of multichannel_image (%d), channels (%d). Please check the input.", multichannel_image.size(), channels.size()); + ROS_ERROR_STREAM("Current Channels: " << boost::algorithm::join(channels, ", ")); + return; + } + // Pass image to pipeline - map_.input_image(multichannel_image, channels, fusion_methods, transformationMapToSensor.rotation(), transformationMapToSensor.translation(), cameraMatrix, + map_.input_image(multichannel_image, channels, transformationMapToSensor.rotation(), transformationMapToSensor.translation(), cameraMatrix, image.rows, image.cols); } void ElevationMappingNode::imageCallback(const sensor_msgs::ImageConstPtr& image_msg, - const sensor_msgs::CameraInfoConstPtr& camera_info_msg) { + const sensor_msgs::CameraInfoConstPtr& camera_info_msg, + const std::string& key) { auto start = ros::Time::now(); - // Default channels and fusion methods for image is rgb and image_color - std::vector channels; - std::vector fusion_methods; - channels.push_back("rgb"); - fusion_methods.push_back("color"); - inputImage(image_msg, camera_info_msg, channels, fusion_methods); + inputImage(image_msg, camera_info_msg, channels_[key]); ROS_DEBUG_THROTTLE(1.0, "ElevationMap processed an image in %f sec.", (ros::Time::now() - start).toSec()); } -void ElevationMappingNode::imageFusionCallback(const sensor_msgs::ImageConstPtr& image_msg, +void ElevationMappingNode::imageChannelCallback(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg, - const elevation_map_msgs::FusionInfoConstPtr& fusion_info_msg) { + const elevation_map_msgs::ChannelInfoConstPtr& channel_info_msg) { auto start = ros::Time::now(); // Default channels and fusion methods for image is rgb and image_color std::vector channels; - std::vector fusion_methods; - channels = fusion_info_msg->channels; - fusion_methods = fusion_info_msg->fusion_methods; - ROS_INFO_STREAM("Channels: " << boost::algorithm::join(channels, ", ")); - ROS_INFO_STREAM("Fusion methods: " << boost::algorithm::join(fusion_methods, ", ")); - // channels.push_back("rgb"); - // fusion_methods.push_back("color"); - inputImage(image_msg, camera_info_msg, channels, fusion_methods); + channels = channel_info_msg->channels; + inputImage(image_msg, camera_info_msg, channels); ROS_DEBUG_THROTTLE(1.0, "ElevationMap processed an image in %f sec.", (ros::Time::now() - start).toSec()); } diff --git a/elevation_mapping_cupy/src/elevation_mapping_wrapper.cpp b/elevation_mapping_cupy/src/elevation_mapping_wrapper.cpp index 2fd31770..f66f9a79 100644 --- a/elevation_mapping_cupy/src/elevation_mapping_wrapper.cpp +++ b/elevation_mapping_cupy/src/elevation_mapping_wrapper.cpp @@ -121,12 +121,13 @@ void ElevationMappingWrapper::setParameters(ros::NodeHandle& nh) { } param_.attr("subscriber_cfg") = sub_dict; - if (!nh.hasParam("pointcloud_channel_fusion")) { - ROS_WARN("No pointcloud_channel_fusion parameter found. Using default values."); + // point cloud channel fusion + if (!nh.hasParam("pointcloud_channel_fusions")) { + ROS_WARN("No pointcloud_channel_fusions parameter found. Using default values."); } else { XmlRpc::XmlRpcValue pointcloud_channel_fusion; - nh.getParam("pointcloud_channel_fusion", pointcloud_channel_fusion); + nh.getParam("pointcloud_channel_fusions", pointcloud_channel_fusion); py::dict pointcloud_channel_fusion_dict; for (auto& channel_fusion : pointcloud_channel_fusion) { @@ -136,7 +137,28 @@ void ElevationMappingWrapper::setParameters(ros::NodeHandle& nh) { pointcloud_channel_fusion_dict[name] = fusion; } } - param_.attr("pointcloud_channel_fusion") = pointcloud_channel_fusion_dict; + ROS_INFO_STREAM("pointcloud_channel_fusion_dict: " << pointcloud_channel_fusion_dict); + param_.attr("pointcloud_channel_fusions") = pointcloud_channel_fusion_dict; + } + + // image channel fusion + if (!nh.hasParam("image_channel_fusions")) { + ROS_WARN("No image_channel_fusions parameter found. Using default values."); + } + else { + XmlRpc::XmlRpcValue image_channel_fusion; + nh.getParam("image_channel_fusions", image_channel_fusion); + + py::dict image_channel_fusion_dict; + for (auto& channel_fusion : image_channel_fusion) { + const char* const name = channel_fusion.first.c_str(); + std::string fusion = static_cast(channel_fusion.second); + if (!image_channel_fusion_dict.contains(name)) { + image_channel_fusion_dict[name] = fusion; + } + } + ROS_INFO_STREAM("image_channel_fusion_dict: " << image_channel_fusion_dict); + param_.attr("image_channel_fusions") = image_channel_fusion_dict; } param_.attr("update")(); @@ -155,10 +177,10 @@ void ElevationMappingWrapper::input(const RowMatrixXd& points, const std::vector Eigen::Ref(t), positionNoise, orientationNoise); } -void ElevationMappingWrapper::input_image(const std::vector& multichannel_image, const std::vector& channels, const std::vector& fusion_methods, const RowMatrixXd& R, +void ElevationMappingWrapper::input_image(const std::vector& multichannel_image, const std::vector& channels, const RowMatrixXd& R, const Eigen::VectorXd& t, const RowMatrixXd& cameraMatrix, int height, int width) { py::gil_scoped_acquire acquire; - map_.attr("input_image")(multichannel_image, channels, fusion_methods, Eigen::Ref(R), Eigen::Ref(t), + map_.attr("input_image")(multichannel_image, channels, Eigen::Ref(R), Eigen::Ref(t), Eigen::Ref(cameraMatrix), height, width); }