I created a node for monocular inertial ORB_SLAM3. Using ROS2 Humble and C++. The code has one problem as need to convert Cv::Mat type to Sophus::SE3. Here is the function where the convert should happened.
void ImageGrabber::SyncWithImu()
{
while (rclcpp::ok())
{
cv::Mat im;
double tIm = 0;
if (!img0Buf.empty()&&!mpImuGb->imuBuf.empty())
{
tIm = img0Buf.front()->header.stamp.sec + img0Buf.front()->header.stamp.nanosec * 1e-9;
if (tIm > mpImuGb->imuBuf.back()->header.stamp.sec + mpImuGb->imuBuf.back()->header.stamp.nanosec * 1e-9)
continue;
{
this->mBufMutex.lock();
im = GetImage(img0Buf.front());
img0Buf.pop();
this->mBufMutex.unlock();
}
vector<ORB_SLAM3::IMU::Point> vImuMeas;
mpImuGb->mBufMutex.lock();
if(!mpImuGb->imuBuf.empty())
{
// Load imu measurements from buffer
vImuMeas.clear();
while (!mpImuGb->imuBuf.empty() && mpImuGb->imuBuf.front()->header.stamp.sec + mpImuGb->imuBuf.front()->header.stamp.nanosec * 1e-9 <= tIm)
{
double t = mpImuGb->imuBuf.front()->header.stamp.sec + mpImuGb->imuBuf.front()->header.stamp.nanosec * 1e-9;
cv::Point3f acc(mpImuGb->imuBuf.front()->linear_acceleration.x, mpImuGb->imuBuf.front()->linear_acceleration.y, mpImuGb->imuBuf.front()->linear_acceleration.z);
cv::Point3f gyr(mpImuGb->imuBuf.front()->angular_velocity.x, mpImuGb->imuBuf.front()->angular_velocity.y, mpImuGb->imuBuf.front()->angular_velocity.z);
vImuMeas.push_back(ORB_SLAM3::IMU::Point(acc,gyr,t));
mpImuGb->imuBuf.pop();
}
}
mpImuGb->mBufMutex.unlock();
if(mbClahe)
mClahe->apply(im,im);
cv::Mat T_, R_, t_ ;
//Sophus::SE3f T_;
//stores return variable of TrackMonocular
//cv::Mat TrackMonocular(const cv::Mat &im, const double ×tamp, const vector<IMU::Point>& vImuMeas = vector<IMU::Point>(), string filename="");
T_= mpSLAM->TrackMonocular(im,tIm,vImuMeas);
if (!(T_.empty()))
{
RCLCPP_INFO(logger_, "Enering publisher execution block");
cv::Size s = T_.size();
if ((s.height >= 3) && (s.width >= 3))
{
R_ = T_.rowRange(0,3).colRange(0,3).t();
t_ = -R_*T_.rowRange(0,3).col(3);
std::vector<float> q = ORB_SLAM3::Converter::toQuaternion(R_);
float scale_factor=1.0;
tf2::Transform tf_transform;
tf_transform.setOrigin(tf2::Vector3(t_.at<float>(0, 0) * scale_factor, t_.at<float>(0, 1) * scale_factor, t_.at<float>(0, 2) * scale_factor));
tf_transform.setRotation(tf2::Quaternion(q[0], q[1], q[2], q[3]));
//RCLCPP_INFO(this->get_logger(), "Mono_Inertial node started");
RCLCPP_INFO(logger_, "Transformation matrix R_: %s", cv::format("%f %f %f\n%f %f %f\n%f %f %f",
R_.at<float>(0, 0), R_.at<float>(0, 1), R_.at<float>(0, 2),
R_.at<float>(1, 0), R_.at<float>(1, 1), R_.at<float>(1, 2),
R_.at<float>(2, 0), R_.at<float>(2, 1), R_.at<float>(2, 2)).c_str());
}
}
}
//RCLCPP_INFO(logger_, "After publisher execution block");
std::chrono::milliseconds tSleep(1);
std::this_thread::sleep_for(tSleep);
}
}
So I would like to publish the pose of the ORB_SLAM3 using camera and IMU. The problem is in this line
T_= mpSLAM->TrackMonocular(im,tIm,vImuMeas);
And I got this error:
error: no match for ‘operator=’ (operand types are ‘cv::Mat’ and ‘Sophus::SE3f’ {aka ‘Sophus::SE3’}) 257 | T_= mpSLAM->TrackMonocular(im,tIm,vImuMeas);
So how is the way to implement CvMatToSE3 function , mean this conversion? Or do I need to implement the CvMatToSE3 function in the Converter class (inside Converter.h in ~ORB_SLAM3/include/Converter.h or a separate header/cpp file)?
Thanks
So how is the way to implement CvMatToSE3 function , mean this conversion?
Here is an implementation for CvMatToSE3
.
template <class Scalar>
Sophus::SE3<Scalar> CvMatToSE3(const cv::Mat& m) {
const bool is_valid = (m.rows == 3 || m.rows == 4) && (m.cols == 4);
if (!is_valid) throw std::runtime_error("input is not a valid SE3 matrix");
Eigen::Matrix4f x;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 4; ++j) {
x(i, j) = m.at<float>(i, j);
}
}
if (!Sophus::isOrthogonal(x.topLeftCorner<3, 3>())) {
throw std::runtime_error("rotation is not orthogonal");
}
x(3, 0) = 0.f;
x(3, 1) = 0.f;
x(3, 2) = 0.f;
x(3, 3) = 1.f;
return Sophus::SE3f(x).cast<Scalar>();
}
It raises an exception if the cv matrix is not 4x4 or 3x4, and the rotational part is not orthogonal. Another key point is that, cv::Mat
uses float
, and if you directly convert it to Sophus::SE3<double>
without using Sophus::SE3f(x).cast<double>()
, it might fail the orthogonality due to numerical precision. That's why I use Eigen::Matrix4f
and Sophus::SE3f
as the bridge.
To convert back:
template <class Scalar>
cv::Mat SE3ToCvMat(const Sophus::SE3<Scalar>& se3) {
// get 4x4 homogenous matrix
const Eigen::Matrix<Scalar, 4, 4> m = se3.matrix();
// convert to cv::Mat
cv::Mat cvMat(m.rows(), m.cols(), CV_32F);
for (int i = 0; i < m.rows(); ++i) {
for (int j = 0; j < m.cols(); ++j) {
cvMat.at<float>(i, j) = m(i, j);
}
}
return cvMat;
}
Or do I need to implement the CvMatToSE3 function in the Converter class (inside Converter.h in ~ORB_SLAM3/include/Converter.h or a separate header/cpp file)?
You can put the implementation anywhere as long as ImageGrabber::SyncWithImu
can see it. A simple way is to put it in the cpp file where ImageGrabber::SyncWithImu
is, in an anonymous namespace, see Why should you prefer unnamed namespaces over static functions?.