书接上回,上一篇博客主要分析了ROS2Visualizer中接口的逻辑,接下来要进入正题,对VioManager进行分析


文章目录

  • 主要成员变量
  • feed_measurement_imu()
  • feed_measurement_camera()
  • do_feature_propagate_update()



主要成员变量

  • 系统状态变量
/// Our master state object :D
std::shared_ptr<State> state;
  • 状态传播
/// Propagator of our state
std::shared_ptr<Propagator> propagator;
  • 特征追踪,也可以追踪ARUCO
/// Our sparse feature tracker (klt or descriptor)
std::shared_ptr<ov_core::TrackBase> trackFEATS;
/// Our aruoc tracker
std::shared_ptr<ov_core::TrackBase> trackARUCO;
  • 系统初始化
/// State initializer
std::shared_ptr<ov_init::InertialInitializer> initializer;
  • 系统状态更新,分别为MSCKF更新,SLAM更新,ZUPT更新
/// Our MSCKF feature updater
std::shared_ptr<UpdaterMSCKF> updaterMSCKF;
/// Our SLAM/ARUCO feature updater
std::shared_ptr<UpdaterSLAM> updaterSLAM;
/// Our zero velocity tracker
std::shared_ptr<UpdaterZeroVelocity> updaterZUPT;

接下来,我们主要看在ROS2Visualizer中调用的feed_measurement_imu()和feed_measurement_camera()分别做了什么?

feed_measurement_imu()

判断是否完成初始化,并更新oldest_time

double oldest_time = state->margtimestep();
  if (oldest_time > state->_timestamp) {
    oldest_time = -1;
  }
  if (!is_initialized_vio) {
    oldest_time = message.timestamp - params.init_options.init_window_time +
                  state->_calib_dt_CAMtoIMU->value()(0) - 0.10;
  }

将数据喂给propagator,用于后续的状态传播

propagator->feed_imu(message, oldest_time);

如果系统未进行初始化,则将数据喂给initializer

// Push back to our initializer
  if (!is_initialized_vio) {
    initializer->feed_imu(message, oldest_time);
  }

同时ZUPT更新也需要根据IMU数据判断车辆是否静止

// Push back to the zero velocity updater if it is enabled
  // No need to push back if we are just doing the zv-update at the begining and
  // we have moved
  if (is_initialized_vio && updaterZUPT != nullptr &&
      (!params.zupt_only_at_beginning || !has_moved_since_zupt)) {
    updaterZUPT->feed_imu(message, oldest_time);
  }

feed_measurement_camera()

该函数实际调用的是 track_image_and_update()函数,接下来我们对该函数进行分析
首先针对输入的图像构建金字塔

// Downsample if we are downsampling
  ov_core::CameraData message = message_const;
  for (size_t i = 0; i < message.sensor_ids.size() && params.downsample_cameras;
       i++) {
    cv::Mat img = message.images.at(i);
    cv::Mat mask = message.masks.at(i);
    cv::Mat img_temp, mask_temp;
    cv::pyrDown(img, img_temp, cv::Size(img.cols / 2.0, img.rows / 2.0));
    message.images.at(i) = img_temp;
    cv::pyrDown(mask, mask_temp, cv::Size(mask.cols / 2.0, mask.rows / 2.0));
    message.masks.at(i) = mask_temp;
  }

接下来对处理过后的图像进行特征追踪

trackFEATS->feed_new_camera(message);

其次,尝试进行ZUPT更新,如果更新成功,则清理老的IMU数据

if (is_initialized_vio && updaterZUPT != nullptr &&
      (!params.zupt_only_at_beginning || !has_moved_since_zupt)) {
    // If the same state time, use the previous timestep decision
    if (state->_timestamp != message.timestamp) {
      did_zupt_update = updaterZUPT->try_update(state, message.timestamp);
    }
    if (did_zupt_update) {
      assert(state->_timestamp == message.timestamp);
      propagator->clean_old_imu_measurements(
          message.timestamp + state->_calib_dt_CAMtoIMU->value()(0) - 0.10);
      updaterZUPT->clean_old_imu_measurements(
          message.timestamp + state->_calib_dt_CAMtoIMU->value()(0) - 0.10);
      return;
    }
  }

如果系统未进行初始化,则尝试进行初始化

if (!is_initialized_vio) {
    is_initialized_vio = try_to_initialize(message);
    if (!is_initialized_vio) {
      double time_track = (rT2 - rT1).total_microseconds() * 1e-6;
      PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for tracking\n" RESET, time_track);
      return;
    }
  }

最后针对输入的图像进行传播与更新

// Call on our propagate and update function
  do_feature_propagate_update(message);

上面主要用了两个函数: try_to_initialize() 和 do_feature_propagate_update(),初始化部分我们后面再做详细介绍,这里主要针对do_feature_propagate_update() 进行详细介绍。

do_feature_propagate_update()

在新的一帧图像到来时,首先将系统的状态通过IMU的数据递推到当前的相机时刻。

if (state->_timestamp != message.timestamp) {
    propagator->propagate_and_clone(state, message.timestamp);
  }

如果没有足够的IMU数据也无法递推,同时如果IMU的数据不足,也无法递推到当前时刻

if ((int)state->_clones_IMU.size() <
      std::min(state->_options.max_clone_size, 5)) {
    PRINT_DEBUG("waiting for enough clone states (%d of %d)....\n",
                (int)state->_clones_IMU.size(),
                std::min(state->_options.max_clone_size, 5));
    return;
  }
  // Return if we where unable to propagate
  if (state->_timestamp != message.timestamp) {
    PRINT_WARNING(RED
                  "[PROP]: Propagator unable to propagate the state forward in "
                  "time!\n" RESET);
    PRINT_WARNING(
        RED "[PROP]: It has been %.3f since last time we propagated\n" RESET,
        message.timestamp - state->_timestamp);
    return;
  }

接下来需要从trackFEATS获取特征对系统状态进行观测更新,特征更新的策略如下

opencv videowrier 0 字节 opencv insufficient memory_初始化

std::vector<std::shared_ptr<Feature>> feats_lost, feats_marg, feats_slam;

feats_lost: 获得在state->_timestamp时间之前已经在追踪的所有特征点。

feats_lost =
      trackFEATS->get_feature_database()->features_not_containing_newer(
          state->_timestamp, false, true);

***feats_marg:***首先初始化为被跟踪上的特征点。后续用作EKF更新,但并未加入状态变量。
feats_slam: 初始化为Aruco的特征点,后续加入状态变量进行EKF更新。

if ((int)state->_clones_IMU.size() > state->_options.max_clone_size ||
      (int)state->_clones_IMU.size() > 5) {
    feats_marg = trackFEATS->get_feature_database()->features_containing(
        state->margtimestep(), false, true);
    if (trackARUCO != nullptr &&
        message.timestamp - startup_time >= params.dt_slam_delay) {
      feats_slam = trackARUCO->get_feature_database()->features_containing(
          state->margtimestep(), false, true);
    }
  }

其次,针对feats_marg中的特征点根据追踪到的次数,划分为长期特征feats_maxtracks和短期特征:

// Find tracks that have reached max length, these can be made into SLAM
  // features
  std::vector<std::shared_ptr<Feature>> feats_maxtracks;
  auto it2 = feats_marg.begin();
  while (it2 != feats_marg.end()) {
    // See if any of our camera's reached max track
    bool reached_max = false;
    for (const auto &cams : (*it2)->timestamps) {
      if ((int)cams.second.size() > state->_options.max_clone_size) {
        reached_max = true;
        break;
      }
    }
    // If max track, then add it to our possible slam feature list
    if (reached_max) {
      feats_maxtracks.push_back(*it2);
      it2 = feats_marg.erase(it2);
    } else {
      it2++;
    }
  }

接下来,将跟踪到的长期特征点feats_maxtracks添加到feats_slam中,因为feats_slam会被加入到状态变量中,所以必须严格控制防止状态爆炸。

if (state->_options.max_slam_features > 0 &&
      message.timestamp - startup_time >= params.dt_slam_delay &&
      (int)state->_features_SLAM.size() <
          state->_options.max_slam_features + curr_aruco_tags) {
    // Get the total amount to add, then the max amount that we can add given
    // our marginalize feature array
    int amount_to_add = (state->_options.max_slam_features + curr_aruco_tags) -
                        (int)state->_features_SLAM.size();
    int valid_amount = (amount_to_add > (int)feats_maxtracks.size())
                           ? (int)feats_maxtracks.size()
                           : amount_to_add;
    // If we have at least 1 that we can add, lets add it!
    // Note: we remove them from the feat_marg array since we don't want to
    // reuse information...
    if (valid_amount > 0) {
      feats_slam.insert(feats_slam.end(), feats_maxtracks.end() - valid_amount,
                        feats_maxtracks.end());
      feats_maxtracks.erase(feats_maxtracks.end() - valid_amount,
                            feats_maxtracks.end());
    }
  }

遍历所有当前状态对应的SLAM特征,如果SLAM特征被当前的trackFEATS追踪到,则加入到feats_slam中,否则将landmark设置为需要边缘化。

for (std::pair<const size_t, std::shared_ptr<Landmark>> &landmark :
       state->_features_SLAM) {
    if (trackARUCO != nullptr) {
      std::shared_ptr<Feature> feat1 =
          trackARUCO->get_feature_database()->get_feature(
              landmark.second->_featid);
      if (feat1 != nullptr) feats_slam.push_back(feat1);
    }
    std::shared_ptr<Feature> feat2 =
        trackFEATS->get_feature_database()->get_feature(
            landmark.second->_featid);
    if (feat2 != nullptr) feats_slam.push_back(feat2);
    assert(landmark.second->_unique_camera_id != -1);
    bool current_unique_cam =
        std::find(message.sensor_ids.begin(), message.sensor_ids.end(),
                  landmark.second->_unique_camera_id) !=
        message.sensor_ids.end();
    if (feat2 == nullptr && current_unique_cam)
      landmark.second->should_marg = true;
    if (landmark.second->update_fail_count > 1)
      landmark.second->should_marg = true;
  }

接下来进行state->_features_SLAM边缘化处理

StateHelper::marginalize_slam(state);

对当前的feats_slam进行区分为feats_slam_DELAYED和feats_slam_UPDATE

std::vector<std::shared_ptr<Feature>> feats_slam_DELAYED, feats_slam_UPDATE;
  for (size_t i = 0; i < feats_slam.size(); i++) {
    if (state->_features_SLAM.find(feats_slam.at(i)->featid) !=
        state->_features_SLAM.end()) {
      feats_slam_UPDATE.push_back(feats_slam.at(i));
      // PRINT_DEBUG("[UPDATE-SLAM]: found old feature %d (%d
      // measurements)\n",(int)feats_slam.at(i)->featid,(int)feats_slam.at(i)->timestamps_left.size());
    } else {
      feats_slam_DELAYED.push_back(feats_slam.at(i));
      // PRINT_DEBUG("[UPDATE-SLAM]: new feature ready %d (%d
      // measurements)\n",(int)feats_slam.at(i)->featid,(int)feats_slam.at(i)->timestamps_left.size());
    }
  }

接下来搜集进行MSCKF更新的featsup_MSCKF,首先将feats_lost初始化为featsup_MSCKF,其次将feats_marg(短期特征)和feats_maxtracks(未SLAM更新的长期特征)加入:

std::vector<std::shared_ptr<Feature>> featsup_MSCKF = feats_lost;
  featsup_MSCKF.insert(featsup_MSCKF.end(), feats_marg.begin(),
                       feats_marg.end());
  featsup_MSCKF.insert(featsup_MSCKF.end(), feats_maxtracks.begin(),
                       feats_maxtracks.end());

通过上述操作我们获得了featsup_MSCKF和feats_slam可以分别作为MSCKF更新和SLAM更新。

在进行MSCKF更新之前,需要对featsup_MSCKF进行筛选,目前是按照被追踪到的次数进行排序,获取在设置范围内的特征点个数进行EKF更新:

auto compare_feat = [](const std::shared_ptr<Feature> &a,
                         const std::shared_ptr<Feature> &b) -> bool {
    size_t asize = 0;
    size_t bsize = 0;
    for (const auto &pair : a->timestamps) asize += pair.second.size();
    for (const auto &pair : b->timestamps) bsize += pair.second.size();
    return asize < bsize;
  };
  std::sort(featsup_MSCKF.begin(), featsup_MSCKF.end(), compare_feat);

  // Pass them to our MSCKF updater
  // NOTE: if we have more then the max, we select the "best" ones (i.e. max
  // tracks) for this update NOTE: this should only really be used if you want
  // to track a lot of features, or have limited computational resources
  if ((int)featsup_MSCKF.size() > state->_options.max_msckf_in_update)
    featsup_MSCKF.erase(
        featsup_MSCKF.begin(),
        featsup_MSCKF.end() - state->_options.max_msckf_in_update);
  updaterMSCKF->update(state, featsup_MSCKF);

在进行SLAM EKF更新时首先对已经在状态向量中的部分特征进行EKF更新,其次再对不在状态向量里的特征进行EKF更新

std::vector<std::shared_ptr<Feature>> feats_slam_UPDATE_TEMP;
  while (!feats_slam_UPDATE.empty()) {
    // Get sub vector of the features we will update with
    std::vector<std::shared_ptr<Feature>> featsup_TEMP;
    featsup_TEMP.insert(
        featsup_TEMP.begin(), feats_slam_UPDATE.begin(),
        feats_slam_UPDATE.begin() + std::min(state->_options.max_slam_in_update,
                                             (int)feats_slam_UPDATE.size()));
    feats_slam_UPDATE.erase(
        feats_slam_UPDATE.begin(),
        feats_slam_UPDATE.begin() + std::min(state->_options.max_slam_in_update,
                                             (int)feats_slam_UPDATE.size()));
    // Do the update
    updaterSLAM->update(state, featsup_TEMP);
    feats_slam_UPDATE_TEMP.insert(feats_slam_UPDATE_TEMP.end(),
                                  featsup_TEMP.begin(), featsup_TEMP.end());
  }
  feats_slam_UPDATE = feats_slam_UPDATE_TEMP;
  rT5 = boost::posix_time::microsec_clock::local_time();
  updaterSLAM->delayed_init(state, feats_slam_DELAYED);
  rT6 = boost::posix_time::microsec_clock::local_time();