@Alex-Kushleyev @wilkinsaf Thanks for your answers! Actually, I've done something similar with docker, but I still have a few problems with the data format that mpa_to_ros sends. To address this, I did the following:
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import yaml
class CamImageConverter:
def __init__(self):
self.bridge = CvBridge()
# Subscribers
rospy.Subscriber("/drone/tof_depth/image_raw", Image, self.depth_callback)
rospy.Subscriber("/drone/hires/image_raw", Image, self.rgb_callback)
rospy.Subscriber("/drone/tracking_front/image_raw", Image, self.tracking_callback)
# Publishers
self.depth_pub = rospy.Publisher("/drone/tof_depth/image", Image, queue_size=1000)
self.rgb_pub = rospy.Publisher("/drone/hires/image", Image, queue_size=1000)
self.rgb_info_pub = rospy.Publisher("/drone/hires/camera_info", CameraInfo, queue_size=1000)
self.tracking_pub = rospy.Publisher("/drone/tracking_front/image", Image, queue_size=1000)
self.tracking_info_pub = rospy.Publisher("/drone/tracking_front/camera_info", CameraInfo, queue_size=1000)
self.camera_info_msg = self.load_camera_info("/root/ros_ws/src/cam_converter/calibration/ost.yaml")
rospy.Timer(rospy.Duration(1.0 / 30), self.publish_tracking_info)
rospy.Timer(rospy.Duration(1.0 / 30), self.publish_rgb_info)
def load_camera_info(self, filename):
with open(filename, "r") as file:
calib_data = yaml.safe_load(file)
camera_info_msg = CameraInfo()
camera_info_msg.height = calib_data["image_height"]
camera_info_msg.width = calib_data["image_width"]
camera_info_msg.distortion_model = calib_data["distortion_model"]
camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
camera_info_msg.K = calib_data["camera_matrix"]["data"]
camera_info_msg.R = calib_data["rectification_matrix"]["data"]
camera_info_msg.P = calib_data["projection_matrix"]["data"]
return camera_info_msg
def depth_callback(self, msg):
try:
# Convertir le message ROS en une image OpenCV
raw_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="mono8")
# Normalisation de l'image pour améliorer la qualité
depth_image = cv2.normalize(raw_image, None, 0, 65535, cv2.NORM_MINMAX).astype(np.uint16)
# Publier l'image convertie
converted_msg = self.bridge.cv2_to_imgmsg(depth_image, encoding="16UC1")
converted_msg.header.frame_id = 'tof_link'
converted_msg.header.stamp = rospy.Time.now()
self.depth_pub.publish(converted_msg)
except CvBridgeError as e:
rospy.logerr("CvBridge Error: {0}".format(e))
def rgb_callback(self, msg):
try:
# Convertir le message ROS en une image OpenCV
raw_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
# Conversion YUV422 ou NV12 en RGB8
if msg.encoding == "yuv422":
rgb_image = cv2.cvtColor(raw_image, cv2.COLOR_YUV2RGB_Y422)
elif msg.encoding == "nv12":
yuv_image = np.frombuffer(msg.data, dtype=np.uint8).reshape((msg.height * 3 // 2, msg.width))
rgb_image = cv2.cvtColor(yuv_image, cv2.COLOR_YUV2RGB_NV12)
else:
rospy.logerr("Unsupported encoding: {}".format(msg.encoding))
return
# Redimensionner l'image RGB pour qu'elle ait la même taille que l'image ToF (240x180)
resized_rgb_image = cv2.resize(rgb_image, (180, 240), interpolation=cv2.INTER_LINEAR)
# Publier l'image convertie et redimensionnée
converted_msg = self.bridge.cv2_to_imgmsg(resized_rgb_image, encoding="rgb8")
converted_msg.header.frame_id = 'hires_link'
converted_msg.header.stamp = rospy.Time.now()
self.rgb_pub.publish(converted_msg)
except CvBridgeError as e:
rospy.logerr("CvBridge Error: {0}".format(e))
def tracking_callback(self, msg):
try:
# Convertir le message ROS en une image OpenCV
raw_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
# Redimensionner l'image RGB pour qu'elle ait la même taille que l'image ToF (240x180)
resized_image = cv2.resize(raw_image, (180, 240), interpolation=cv2.INTER_LINEAR)
# Publier l'image convertie et redimensionnée
converted_msg = self.bridge.cv2_to_imgmsg(resized_image)
converted_msg.header.frame_id = 'tracking_front_link'
converted_msg.header.stamp = rospy.Time.now()
self.tracking_pub.publish(converted_msg)
except CvBridgeError as e:
rospy.logerr("CvBridge Error: {0}".format(e))
def publish_rgb_info(self, event):
self.camera_info_msg.header.stamp = rospy.Time.now()
self.camera_info_msg.header.frame_id = 'hires_link'
self.rgb_info_pub.publish(self.camera_info_msg)
def publish_tracking_info(self, event):
camera_info_msg = CameraInfo()
camera_info_msg.header.frame_id = 'tracking_front_link'
camera_info_msg.header.stamp = rospy.Time.now()
camera_info_msg.height = 240
camera_info_msg.width = 180
camera_info_msg.distortion_model = 'plumb_bob'
camera_info_msg.D = [5.0234834483837899e-02, 3.2967878240805777e-02, -1.6468916528340826e-02, 1.8155367951008903e-03]
camera_info_msg.K = [4.6177826259848081e+02, 0., 6.1505488982380564e+02, 0., 4.6160870283061041e+02, 4.1065345867690684e+02, 0., 0., 1.]
camera_info_msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
camera_info_msg.P = [4.6177826259848081e+02, 0., 6.1505488982380564e+02, 0., 0., 4.6160870283061041e+02, 4.1065345867690684e+02, 0., 0., 0., 1., 0.]
self.tracking_info_pub.publish(camera_info_msg)
def main():
rospy.init_node('cam_image_converter', anonymous=True)
CamImageConverter()
rospy.spin()
if __name__ == '__main__':
main()
I don't know if this is the right way to do it, but it seems to work. Do you have any suggestions for implementing this directly on the Voxl2?
Also, I have a few problems with camera calibrations and the drone model. Do you have any files that contain all the data for the cameras (ToF, tracking, and high-resolution) and a model of the drone that I can use in RViz and Gazebo? This could really help me.
For more context, I'm trying to implement this project for Autonomous SLAM with ROS. Has anyone already used RTAB-Map on this drone? Do you have any advice to solve this problem?
Thank you for your help