VINS_Fusion代码注释(三)--------------------图像处理部分-----------------------------
VINS_Fusion程序注释(一) rosNodeTest.cpp
VINS_Fusion代码注释(二)---------IMU预积分部分---------------
1、具体程序处理顺序
rosNodeTest.cpp(sync_process)->inputimage()->processMeasurements()->分为两部分
a.processIMU() //上次博客注释
b.processImage() //这次要讲的图像处理部分
2、具体代码
void Estimator::processImage(const map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> &image, const double header)
{
ROS_DEBUG("new image coming ------------------------------------------");
ROS_DEBUG("Adding feature points %lu", image.size());
//关键帧的判断依据是rotation-compensated过后的parallax足够大,并且tracking上的feature足够多;关键帧会保留在当前Sliding Window中,marginalize掉Sliding Window中最旧的状态,如果是非关键帧则优先marginalize掉。
//VINS里为了控制优化计算量,在实时情况下,只对当前帧之前某一部分帧进行优化,而不是全部历史帧。局部优化帧的数量就是窗口大小。为了维持窗口大小,需要去除旧的帧添加新的帧,也就是边缘化 Marginalization。到底是删去最旧的帧(MARGIN_OLD)还是删去刚刚进来窗口倒数第二帧(MARGIN_SECOND_NEW),就需要对当前帧与之前帧进行视差比较,如果是当前帧变化很小,就会删去倒数第二帧,如果变化很大,就删去最旧的帧。VINS 里把特征点管理和检查视差放在了同一个函数里,先添加特征点,再进行视差检查。
if (f_manager.addFeatureCheckParallax(frame_count, image, td))
{
marginalization_flag = MARGIN_OLD;
//printf("keyframe\n");
}
else
{
marginalization_flag = MARGIN_SECOND_NEW;
//printf("non-keyframe\n");
}
ROS_DEBUG("%s", marginalization_flag ? "Non-keyframe" : "Keyframe");
ROS_DEBUG("Solving %d", frame_count);
ROS_DEBUG("number of feature: %d", f_manager.getFeatureCount());
Headers[frame_count] = header;
ImageFrame imageframe(image, header);
imageframe.pre_integration = tmp_pre_integration;
all_image_frame.insert(make_pair(header, imageframe));
tmp_pre_integration = new IntegrationBase{acc_0, gyr_0, Bas[frame_count], Bgs[frame_count]};
/*
当外参完全不知道,VINS可以在线进行估计(rotation),先在processImage内进行初步估计,在后续优化时,在optimize函数中进行优化。
*/
//外参校准函数
if(ESTIMATE_EXTRINSIC == 2)
{
ROS_INFO("calibrating extrinsic param, rotation movement is needed");
if (frame_count != 0)
{
vector<pair<Vector3d, Vector3d>> corres = f_manager.getCorresponding(frame_count - 1, frame_count);
Matrix3d calib_ric;
if (initial_ex_rotation.CalibrationExRotation(corres, pre_integrations[frame_count]->delta_q, calib_ric))
{
ROS_WARN("initial extrinsic rotation calib success");
ROS_WARN_STREAM("initial extrinsic rotation: " << endl << calib_ric);
//有几个相机,就有几个ric,单目情况下,ric内只有一个值
ric[0] = calib_ric;
RIC[0] = calib_ric;
ESTIMATE_EXTRINSIC = 1;//若外参标定成功,则设置flag为1.
}
}
}
if (solver_flag == INITIAL)
{
// monocular + IMU initilization
if (!STEREO && USE_IMU)//单目+IMU
{
if (frame_count == WINDOW_SIZE)
{
bool result = false;
if(ESTIMATE_EXTRINSIC != 2 && (header - initial_timestamp) > 0.1)
{
result = initialStructure();
initial_timestamp = header;
}
if(result)
{
optimization();
updateLatestStates();
solver_flag = NON_LINEAR;
slideWindow();
ROS_INFO("Initialization finish!");
}
else
slideWindow();
}
}
// stereo + IMU initilization
if(STEREO && USE_IMU)//双目+IMU
{
f_manager.initFramePoseByPnP(frame_count, Ps, Rs, tic, ric);
f_manager.triangulate(frame_count, Ps, Rs, tic, ric);
if (frame_count == WINDOW_SIZE)
{
map<double, ImageFrame>::iterator frame_it;
int i = 0;
for (frame_it = all_image_frame.begin(); frame_it != all_image_frame.end(); frame_it++)
{
frame_it->second.R = Rs[i];
frame_it->second.T = Ps[i];
i++;
}
solveGyroscopeBias(all_image_frame, Bgs);
for (int i = 0; i <= WINDOW_SIZE; i++)
{
pre_integrations[i]->repropagate(Vector3d::Zero(), Bgs[i]);
}
optimization();
updateLatestStates();
solver_flag = NON_LINEAR;
slideWindow();
ROS_INFO("Initialization finish!");
}
}
// stereo only initilization
if(STEREO && !USE_IMU)//双目
{
f_manager.initFramePoseByPnP(frame_count, Ps, Rs, tic, ric);
f_manager.triangulate(frame_count, Ps, Rs, tic, ric);
optimization();
if(frame_count == WINDOW_SIZE)
{
optimization();
updateLatestStates();
solver_flag = NON_LINEAR;
slideWindow();
ROS_INFO("Initialization finish!");
}
}
if(frame_count < WINDOW_SIZE)
{
frame_count++;
int prev_frame = frame_count - 1;
Ps[frame_count] = Ps[prev_frame];
Vs[frame_count] = Vs[prev_frame];
Rs[frame_count] = Rs[prev_frame];
Bas[frame_count] = Bas[prev_frame];
Bgs[frame_count] = Bgs[prev_frame];
}
}
else
{
TicToc t_solve;
if(!USE_IMU)
f_manager.initFramePoseByPnP(frame_count, Ps, Rs, tic, ric);
f_manager.triangulate(frame_count, Ps, Rs, tic, ric);
optimization();
set<int> removeIndex;
outliersRejection(removeIndex);
f_manager.removeOutlier(removeIndex);
if (! MULTIPLE_THREAD)
{
featureTracker.removeOutliers(removeIndex);
predictPtsInNextFrame();
}
ROS_DEBUG("solver costs: %fms", t_solve.toc());
if (failureDetection())
{
ROS_WARN("failure detection!");
failure_occur = 1;
clearState();
setParameter();
ROS_WARN("system reboot!");
return;
}
slideWindow();
f_manager.removeFailures();
// prepare output of VINS
key_poses.clear();
for (int i = 0; i <= WINDOW_SIZE; i++)
key_poses.push_back(Ps[i]);
last_R = Rs[WINDOW_SIZE];
last_P = Ps[WINDOW_SIZE];
last_R0 = Rs[0];
last_P0 = Ps[0];
updateLatestStates();
}
}
//-------------------特征点管理以及视差检查------------------------------
bool FeatureManager::addFeatureCheckParallax(int frame_count, const map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> &image, double td)
//frame_count的含义:start_frame
/*****第一个int为feature_id;第二个int为camera_id;image中包括x,y,z,u,v,vx,vy*******/
/*image.first=feature_id;
image.second[0]: image.second[0].first 单目(双目左目)图像的id号(0);
image.second[0].second 单目(双目左目)图像的特征点的信息
image.second[1]: image.second[1].first 双目(右目)图像的id号 (1);
image.second[1].second 双目(右目)图像的特征点信息;
*/
{
ROS_DEBUG("input feature: %d", (int)image.size());
ROS_DEBUG("num of feature: %d", getFeatureCount());
//所有特征点视差(parallax)总和
double parallax_sum = 0;
//满足某些条件的特征点个数
int parallax_num = 0;
//上次追踪特征点的数量
last_track_num = 0;
last_average_parallax = 0;
//新的特征点数量
new_feature_num = 0;
//长期追踪的点
long_track_num = 0;
for (auto &id_pts : image)
{
FeaturePerFrame f_per_fra(id_pts.second[0].second, td);//单目
assert(id_pts.second[0].first == 0);
if(id_pts.second.size() == 2)//双目
{
f_per_fra.rightObservation(id_pts.second[1].second);
assert(id_pts.second[1].first == 1);
}
int feature_id = id_pts.first;
//find 查找元素;find_if查找指针;
//https://blog.csdn.net/zzhongcy/article/details/87709685
auto it = find_if(feature.begin(), feature.end(), [feature_id](const FeaturePerId &it)
{
return it.feature_id == feature_id;
});
if (it == feature.end())//没找到
{
feature.push_back(FeaturePerId(feature_id, frame_count));//没有找到,在管理器中增加此特征点 ferture 是FeaturePerId的对象
feature.back().feature_per_frame.push_back(f_per_fra);
new_feature_num++;//
}
else if (it->feature_id == feature_id)//找到
{
/*
在其FeaturePerFrame中增加此特征点在此帧的位置以及其它信息(x,y,z,u,v,vx,vy)
*/
it->feature_per_frame.push_back(f_per_fra);
last_track_num++;//
if( it-> feature_per_frame.size() >= 4)//??????????????????????
long_track_num++;
}
}
//if (frame_count < 2 || last_track_num < 20)
//if (frame_count < 2 || last_track_num < 20 || new_feature_num > 0.5 * last_track_num)
if (frame_count < 2 || last_track_num < 20 || long_track_num < 40 || new_feature_num > 0.5 * last_track_num)
return true;
for (auto &it_per_id : feature)
{
/*
计算被当前帧和前两帧共同看到的特征点视差
*/
if (it_per_id.start_frame <= frame_count - 2 &&
it_per_id.start_frame + int(it_per_id.feature_per_frame.size()) - 1 >= frame_count - 1)
{
parallax_sum += compensatedParallax2(it_per_id, frame_count);
parallax_num++;
}
}
if (parallax_num == 0)
{
return true;
}
else
{
ROS_DEBUG("parallax_sum: %lf, parallax_num: %d", parallax_sum, parallax_num);
ROS_DEBUG("current parallax: %lf", parallax_sum / parallax_num * FOCAL_LENGTH);
last_average_parallax = parallax_sum / parallax_num * FOCAL_LENGTH;
return parallax_sum / parallax_num >= MIN_PARALLAX;
}
}
/*******************************************************
* Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology
*
* This file is part of VINS.
*
* Licensed under the GNU General Public License v3.0;
* you may not use this file except in compliance with the License.
*******************************************************/
#ifndef FEATURE_MANAGER_H
#define FEATURE_MANAGER_H
#include <list>
#include <algorithm>
#include <vector>
#include <numeric>
using namespace std;
#include <eigen3/Eigen/Dense>
using namespace Eigen;
#include <ros/console.h>
#include <ros/assert.h>
#include "parameters.h"
#include "../utility/tic_toc.h"
/*
FeatureManager类中的变量:list<FeaturePerId>feature;
FeaturePerId类中的变量:vector<FeaturePerFrame>feature_per_frame;
FeaturePerFrame类: 每个特征点的相关信息。
*/
//每一帧图像特征点的信息
class FeaturePerFrame
{
public:
FeaturePerFrame(const Eigen::Matrix<double, 7, 1> &_point, double td)//左目
{
//相机坐标系下的坐标
point.x() = _point(0);
point.y() = _point(1);
point.z() = _point(2);
//像素坐标
uv.x() = _point(3);
uv.y() = _point(4);
//像素点在x,y方向的移动速率
velocity.x() = _point(5);
velocity.y() = _point(6);
//延迟时间
cur_td = td;
//是否使用双目
is_stereo = false;
}
void rightObservation(const Eigen::Matrix<double, 7, 1> &_point)//右目
{
pointRight.x() = _point(0);
pointRight.y() = _point(1);
pointRight.z() = _point(2);
uvRight.x() = _point(3);
uvRight.y() = _point(4);
velocityRight.x() = _point(5);
velocityRight.y() = _point(6);
is_stereo = true;
}
double cur_td;
Vector3d point, pointRight;
Vector2d uv, uvRight;
Vector2d velocity, velocityRight;
bool is_stereo;
};
//每一个特征点进行的相关操作
class FeaturePerId
{
public:
const int feature_id;
int start_frame;
vector<FeaturePerFrame> feature_per_frame;
int used_num;
double estimated_depth;
int solve_flag; // 0 haven't solve yet; 1 solve succ; 2 solve fail;
FeaturePerId(int _feature_id, int _start_frame)
: feature_id(_feature_id), start_frame(_start_frame),
used_num(0), estimated_depth(-1.0), solve_flag(0)
{
}
int endFrame();
};
//特征点管理器
class FeatureManager
{
public:
FeatureManager(Matrix3d _Rs[]);
void setRic(Matrix3d _ric[]);
void clearState();
int getFeatureCount();
bool addFeatureCheckParallax(int frame_count, const map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> &image, double td);
vector<pair<Vector3d, Vector3d>> getCorresponding(int frame_count_l, int frame_count_r);
//void updateDepth(const VectorXd &x);
void setDepth(const VectorXd &x);
void removeFailures();
void clearDepth();
VectorXd getDepthVector();
void triangulate(int frameCnt, Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[]);
void triangulatePoint(Eigen::Matrix<double, 3, 4> &Pose0, Eigen::Matrix<double, 3, 4> &Pose1,
Eigen::Vector2d &point0, Eigen::Vector2d &point1, Eigen::Vector3d &point_3d);
void initFramePoseByPnP(int frameCnt, Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[]);
bool solvePoseByPnP(Eigen::Matrix3d &R_initial, Eigen::Vector3d &P_initial,
vector<cv::Point2f> &pts2D, vector<cv::Point3f> &pts3D);
void removeBackShiftDepth(Eigen::Matrix3d marg_R, Eigen::Vector3d marg_P, Eigen::Matrix3d new_R, Eigen::Vector3d new_P);
void removeBack();
void removeFront(int frame_count);
void removeOutlier(set<int> &outlierIndex);
list<FeaturePerId> feature;
int last_track_num;
double last_average_parallax;
int new_feature_num;
int long_track_num;
private:
double compensatedParallax2(const FeaturePerId &it_per_id, int frame_count);
const Matrix3d *Rs;
Matrix3d ric[2];
};
#endif
计算每个特征点的视差(补偿视差)
double FeatureManager::compensatedParallax2(const FeaturePerId &it_per_id, int frame_count)
{
//check the second last frame is keyframe or not
//parallax betwwen seconde last frame and third last frame
//检查倒数第二帧是否为关键帧,
//倒数第二帧和倒数第三帧之间的视差
const FeaturePerFrame &frame_i = it_per_id.feature_per_frame[frame_count - 2 - it_per_id.start_frame];
const FeaturePerFrame &frame_j = it_per_id.feature_per_frame[frame_count - 1 - it_per_id.start_frame];
//这是计算重投影误差吗???有点懵.......................
double ans = 0;
Vector3d p_j = frame_j.point;
double u_j = p_j(0);
double v_j = p_j(1);
Vector3d p_i = frame_i.point;
Vector3d p_i_comp;
//int r_i = frame_count - 2;
//int r_j = frame_count - 1;
//p_i_comp = ric[camera_id_j].transpose() * Rs[r_j].transpose() * Rs[r_i] * ric[camera_id_i] * p_i;
p_i_comp = p_i;
double dep_i = p_i(2);
double u_i = p_i(0) / dep_i;
double v_i = p_i(1) / dep_i;
double du = u_i - u_j, dv = v_i - v_j;
double dep_i_comp = p_i_comp(2);
double u_i_comp = p_i_comp(0) / dep_i_comp;
double v_i_comp = p_i_comp(1) / dep_i_comp;
double du_comp = u_i_comp - u_j, dv_comp = v_i_comp - v_j;
ans = max(ans, sqrt(min(du * du + dv * dv, du_comp * du_comp + dv_comp * dv_comp)));
return ans;
}
来源:CSDN
作者:纷繁中淡定
链接:https://blog.csdn.net/qq_36170626/article/details/103835570