当前位置: 首页 > news >正文

openvslam (1) 运行和增大跟踪效果 - MKT

openvslam (1) 运行和增大跟踪效果

 

 

执行命令

0 程序编译
mkdir build && cd build
cmake \-DBUILD_WITH_MARCH_NATIVE=ON \-DUSE_PANGOLIN_VIEWER=ON \-DUSE_SOCKET_PUBLISHER=OFF \-DUSE_STACK_TRACE_LOGGER=ON \-DBOW_FRAMEWORK=DBoW2 \-DBUILD_EXAMPLES=ON \..
make -j41 运行SLAM接收图像开启./run_image_slam -h
输出==============================================
Allowed options:-h, --help             produce help message-v, --vocab arg        vocabulary file path-i, --img-dir arg      directory path which contains images-c, --config arg       config file path--mask arg             mask image path--frame-skip arg (=1)  interval of frame skip--no-sleep             not wait for next frame in real time--auto-term            automatically terminate the viewer--debug                debug mode--eval-log             store trajectory and tracking times for evaluation-p, --map-db arg       store a map database at this path after SLAM==================================================#
./run_image_slam_mydata \
-c  /media/dongdong/Elements1/项目/ehang_/2TestData/2TestData/ehang4/openvslam_config/GNSS_config.yaml图像文件夹建立地图
./run_image_slam_resize \
-v /media/dongdong/新加卷/0ubuntu20/1slam/数据/2RTK/hf1_youtian/hunhe/slam_config/orb_vocab.dbow2 \
-i /media/dongdong/新加卷/0ubuntu20/1slam/数据/2RTK/hf1_youtian/hunhe/images \
-c /media/dongdong/新加卷/0ubuntu20/1slam/数据/2RTK/hf1_youtian/hunhe/slam_config/GNSS_config.yaml  \
--map-db /media/dongdong/新加卷/0ubuntu20/1slam/数据/2RTK/hf1_youtian/hunhe/slam_config/MapResult.msg图像文件夹在线定位
./run_image_localization \
-v /home/nuc/v2_Project/openvslam/v2_Data/data/orb_vocab.dbow2 \
-i /home/nuc/v2_Project/openvslam/v2_Data/data/img \
-c /home/nuc/v2_Project/openvslam/v2_Data/data/camera.yaml  \
--map-db /home/nuc/v2_Project/openvslam/v2_Data/data/MapResult.msg测试视频建图 ./run_video_slam_resize \
-v ../data/orb_vocab.dbow2 \
-m /home/nuc/桌面/data/角度数据/采集数据/rtk学院楼门前畸变80米/DJI_0014.MOV \
-c ../data/camera_RTK_JIBIAN.yaml  \
--frame-skip 1 \
--map-db ../data/DJI_00014_270.msg测试视频定位./run_video_localization_resize \
-v ../data/orb_vocab.dbow2 \
-m /home/nuc/桌面/data/角度数据/采集数据/rtk学院楼门前畸变80米/DJI_0007.MOV \
-c ../data/camera_RTK_JIBIAN.yaml  \
--frame-skip 1 \
--map-db ../data/DJI_0000_00.msg \
--mapping

  

内参文件

 

 

关键点 提高跟踪

# Feature.max_num_keypoints: 3000
Feature.num_levels: 8
Feature.scale_factor: 1.2# ORB Parameters
Feature.max_num_keypoints: 3000    # 大幅增加!默认1000不够用
# Feature.scale_factor: 1.4          # 增大金字塔尺度因子,适应大尺度变化 1.2
# Feature.num_levels: 10              # 增加金字塔层数,适应远景 8
# Feature.ini_fast_threshold: 10      # 降低初始阈值,在弱纹理区也能提点
# Feature.min_fast_threshold: 4       # 降低最小阈值,保证特征数量
Tracking.num_tracked_points: 15     # 降低跟踪点阈值(默认20),允许在特征少时继续跟踪
Tracking.min_num_tracked_points: 8  # 重定位/初始化最小点数,适当降低
Tracking.relocalization_threshold: 0.7  # 降低重定位相似度阈值,更容易触发重定位
Tracking.enable_temporal_keyframe_only: false # 确保插入正常关键帧,而非仅临时帧
# # Initialization (部分参数可能在代码中,需在初始化器里设置)
# Initializer.max_num_ransac_iters: 200  # 增加RANSAC迭代次数
# Initializer.min_num_triangulated: 40   # 降低成功初始化所需的最小三角化点数
# Initializer.parallax_deg_thr: 0.5      # 降低视差阈值(度),适应平移小的俯拍
# Keyframe Insertion
Keyframe.max_num_frms: 30    # 最大关键帧间隔(按帧数),可适当增大
Keyframe.min_num_frms: 1     # 最小间隔,防止插入过快
Keyframe.num_reliable_keypoints: 100  # 关键帧最小可靠点数,可适当降低

  

完整

image

 

Camera.color_order: RGB
Camera.cols: 1805
Camera.cx: 910.8785687265895
Camera.cy: 602.174293145834
Camera.fps: 10
Camera.fx: 1193.7076128098686
Camera.fy: 1193.1735265967602
Camera.k1: 0
Camera.k2: 0
Camera.k3: 0
Camera.k4: 0
Camera.model: perspective
Camera.name: NWPU monocular
Camera.p1: 0
Camera.p2: 0
Camera.rows: 1203
Camera.setup: monocular
Config_PATH: /home/dongdong/2project/0data/RTK/data_1_nwpuUp/data3_1130_13pm//300_location_14pm/
# Feature.max_num_keypoints: 3000
Feature.num_levels: 8
Feature.scale_factor: 1.2# ORB Parameters
Feature.max_num_keypoints: 3000    # 大幅增加!默认1000不够用
# Feature.scale_factor: 1.4          # 增大金字塔尺度因子,适应大尺度变化 1.2
# Feature.num_levels: 10              # 增加金字塔层数,适应远景 8
# Feature.ini_fast_threshold: 10      # 降低初始阈值,在弱纹理区也能提点
# Feature.min_fast_threshold: 4       # 降低最小阈值,保证特征数量
Tracking.num_tracked_points: 15     # 降低跟踪点阈值(默认20),允许在特征少时继续跟踪
Tracking.min_num_tracked_points: 8  # 重定位/初始化最小点数,适当降低
Tracking.relocalization_threshold: 0.7  # 降低重定位相似度阈值,更容易触发重定位
Tracking.enable_temporal_keyframe_only: false # 确保插入正常关键帧,而非仅临时帧
# # Initialization (部分参数可能在代码中,需在初始化器里设置)
# Initializer.max_num_ransac_iters: 200  # 增加RANSAC迭代次数
# Initializer.min_num_triangulated: 40   # 降低成功初始化所需的最小三角化点数
# Initializer.parallax_deg_thr: 0.5      # 降低视差阈值(度),适应平移小的俯拍
# Keyframe Insertion
Keyframe.max_num_frms: 30    # 最大关键帧间隔(按帧数),可适当增大
Keyframe.min_num_frms: 1     # 最小间隔,防止插入过快
Keyframe.num_reliable_keypoints: 100  # 关键帧最小可靠点数,可适当降低Fixed.altitude: 400.0
Fixed.altitude_flag: 0
GNSS_Have: 1
GNSS_PSTH: slam_config/gnss.txt
GNSS_USE: 1
Initial.alt: 415
Initial.lat: 34.033727638888884
Initial.lon: 108.76406197222222
Loop_Th: 80.0
Map.Ininterval: 10
Map.InsertFps: 20
Map.alt: 715
Marker.Num: 0
PangolinViewer.camera_line_width: 3
PangolinViewer.camera_size: 0.08
PangolinViewer.graph_line_width: 1
PangolinViewer.keyframe_line_width: 2
PangolinViewer.keyframe_size: 0.07
PangolinViewer.point_size: 2
PangolinViewer.viewpoint_f: 400
PangolinViewer.viewpoint_x: 0
PangolinViewer.viewpoint_y: -0.65
PangolinViewer.viewpoint_z: -1.9
Relocalize_KF_Th: 3.0
Relocalize_Th: 80.0
Save.data: 1
Save.newmap: 1
Tracking_CF_Th: 10.0
V_Instant_Th: 200.0
WorkMode: map_onlynewData
WorkMode_1: map_all_keep_allKfms
WorkMode_2: map_all
WorkMode_3: location_only
WorkMode_4: map_onlynewData
hAcc: 1.0
image_dir_path: images
image_dir_type: JPG
map_db_path: slam_out/Map_GNSS.msg
mask_img_path: none.jpg
op.Global_Optimize_Th: 1.0
op.Remove_Kayframe_Th: 6.0
op.Second_Optimize_Th: 0
op.is_Second_Optimize: 0
sRt_colmap2gnss: sparse/srt_colmap2gnss.yaml
sRt_slam2gnss: slam_out/srt_slam2gnss.yaml
vAcc: 1.0
vocab_file_path: slam_config/orb_vocab.dbow2

  

主文件

image

 

image

 

#include "util/image_util.h"#ifdef USE_PANGOLIN_VIEWER
#include "pangolin_viewer/viewer.h"
#elif USE_SOCKET_PUBLISHER
#include "socket_publisher/publisher.h"
#endif#include "openvslam/system.h"
#include "openvslam/config.h"#include <iostream>
#include <chrono>
#include <numeric>#include <opencv2/core/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <spdlog/spdlog.h>
#include <popl.hpp>
#include<opencv2/opencv.hpp>using namespace std;
using namespace cv;#ifdef USE_STACK_TRACE_LOGGER
#include <glog/logging.h>
#endif#ifdef USE_GOOGLE_PERFTOOLS
#include <gperftools/profiler.h>
#endifvoid mono_tracking(const std::shared_ptr<openvslam::config>& cfg,const std::string& vocab_file_path, const std::string& image_dir_path, const std::string& mask_img_path,const unsigned int frame_skip, const bool no_sleep, const bool auto_term,const bool eval_log, const std::string& map_db_path) {// load the mask imageconst cv::Mat mask = mask_img_path.empty() ? cv::Mat{} : cv::imread(mask_img_path, cv::IMREAD_GRAYSCALE);const image_sequence sequence(image_dir_path, cfg->camera_->fps_);const auto frames = sequence.get_frames();// build a SLAM systemopenvslam::system SLAM(cfg, vocab_file_path);// startup the SLAM processSLAM.startup();#ifdef USE_PANGOLIN_VIEWERpangolin_viewer::viewer viewer(cfg, &SLAM, SLAM.get_frame_publisher(), SLAM.get_map_publisher());
#elif USE_SOCKET_PUBLISHERsocket_publisher::publisher publisher(cfg, &SLAM, SLAM.get_frame_publisher(), SLAM.get_map_publisher());
#endifstd::vector<double> track_times;track_times.reserve(frames.size());// run the SLAM in another threadstd::thread thread([&]() {for (unsigned int i = 0; i < frames.size(); ++i) {const auto& frame = frames.at(i);const auto img = cv::imread(frame.img_path_, cv::IMREAD_UNCHANGED);const auto tp_1 = std::chrono::steady_clock::now();if (!img.empty() && (i % frame_skip == 0)) {//要缩小图像,通常使用CV_INTER_AREA插值效果最好,而要放大图像,通常使用CV_INTER_CUBIC(慢)或CV_INTER_LINEAR(更快但看起来还可以)效果最好。cv::Size dsize = cv::Size(1805, 1203);cv::Mat imgResize;cv::resize(img, imgResize, dsize, 0, 0, INTER_AREA);// input the current frame and estimate the camera poseSLAM.feed_monocular_frame(imgResize, frame.timestamp_, mask);}const auto tp_2 = std::chrono::steady_clock::now();const auto track_time = std::chrono::duration_cast<std::chrono::duration<double>>(tp_2 - tp_1).count();if (i % frame_skip == 0) {track_times.push_back(track_time);}// wait until the timestamp of the next frameif (!no_sleep && i < frames.size() - 1) {const auto wait_time = frames.at(i + 1).timestamp_ - (frame.timestamp_ + track_time);if (0.0 < wait_time) {std::this_thread::sleep_for(std::chrono::microseconds(static_cast<unsigned int>(wait_time * 1e6)));}}// check if the termination of SLAM system is requested or notif (SLAM.terminate_is_requested()) {break;}}// wait until the loop BA is finishedwhile (SLAM.loop_BA_is_running()) {std::this_thread::sleep_for(std::chrono::microseconds(5000));}// automatically close the viewer
#ifdef USE_PANGOLIN_VIEWERif (auto_term) {viewer.request_terminate();}
#elif USE_SOCKET_PUBLISHERif (auto_term) {publisher.request_terminate();}
#endif});// run the viewer in the current thread
#ifdef USE_PANGOLIN_VIEWERviewer.run();
#elif USE_SOCKET_PUBLISHERpublisher.run();
#endifthread.join();// shutdown the SLAM processSLAM.shutdown();if (eval_log) {// output the trajectories for evaluationSLAM.save_frame_trajectory("frame_trajectory.txt", "TUM");SLAM.save_keyframe_trajectory("keyframe_trajectory.txt", "TUM");// output the tracking times for evaluationstd::ofstream ofs("track_times.txt", std::ios::out);if (ofs.is_open()) {for (const auto track_time : track_times) {ofs << track_time << std::endl;}ofs.close();}}if (!map_db_path.empty()) {// output the map databaseSLAM.save_map_database(map_db_path);}std::sort(track_times.begin(), track_times.end());const auto total_track_time = std::accumulate(track_times.begin(), track_times.end(), 0.0);std::cout << "median tracking time: " << track_times.at(track_times.size() / 2) << "[s]" << std::endl;std::cout << "mean tracking time: " << total_track_time / track_times.size() << "[s]" << std::endl;
}int main(int argc, char* argv[]) {
#ifdef USE_STACK_TRACE_LOGGERgoogle::InitGoogleLogging(argv[0]);google::InstallFailureSignalHandler();
#endif// create optionspopl::OptionParser op("Allowed options");auto help = op.add<popl::Switch>("h", "help", "produce help message");auto vocab_file_path = op.add<popl::Value<std::string>>("v", "vocab", "vocabulary file path");auto img_dir_path = op.add<popl::Value<std::string>>("i", "img-dir", "directory path which contains images");auto config_file_path = op.add<popl::Value<std::string>>("c", "config", "config file path");auto mask_img_path = op.add<popl::Value<std::string>>("", "mask", "mask image path", "");auto frame_skip = op.add<popl::Value<unsigned int>>("", "frame-skip", "interval of frame skip", 1);auto no_sleep = op.add<popl::Switch>("", "no-sleep", "not wait for next frame in real time");auto auto_term = op.add<popl::Switch>("", "auto-term", "automatically terminate the viewer");auto debug_mode = op.add<popl::Switch>("", "debug", "debug mode");auto eval_log = op.add<popl::Switch>("", "eval-log", "store trajectory and tracking times for evaluation");auto map_db_path = op.add<popl::Value<std::string>>("p", "map-db", "store a map database at this path after SLAM", "");try {op.parse(argc, argv);}catch (const std::exception& e) {std::cerr << e.what() << std::endl;std::cerr << std::endl;std::cerr << op << std::endl;return EXIT_FAILURE;}// check validness of optionsif (help->is_set()) {std::cerr << op << std::endl;return EXIT_FAILURE;}if (!vocab_file_path->is_set() || !img_dir_path->is_set() || !config_file_path->is_set()) {std::cerr << "invalid arguments" << std::endl;std::cerr << std::endl;std::cerr << op << std::endl;return EXIT_FAILURE;}// setup loggerspdlog::set_pattern("[%Y-%m-%d %H:%M:%S.%e] %^[%L] %v%$");if (debug_mode->is_set()) {spdlog::set_level(spdlog::level::debug);}else {spdlog::set_level(spdlog::level::info);}// load configurationstd::shared_ptr<openvslam::config> cfg;try {cfg = std::make_shared<openvslam::config>(config_file_path->value());}catch (const std::exception& e) {std::cerr << e.what() << std::endl;return EXIT_FAILURE;}#ifdef USE_GOOGLE_PERFTOOLSProfilerStart("slam.prof");
#endif// run trackingif (cfg->camera_->setup_type_ == openvslam::camera::setup_type_t::Monocular) {mono_tracking(cfg, vocab_file_path->value(), img_dir_path->value(), mask_img_path->value(),frame_skip->value(), no_sleep->is_set(), auto_term->is_set(),eval_log->is_set(), map_db_path->value());}else {throw std::runtime_error("Invalid setup type: " + cfg->camera_->get_setup_type_string());}#ifdef USE_GOOGLE_PERFTOOLSProfilerStop();
#endifreturn EXIT_SUCCESS;
}

  

image

 

image

 

#自己添加 resize图像建图的文件夹例子
add_executable(run_image_slam_resize run_image_slam_resize.cc util/image_util.cc)
list(APPEND EXECUTABLE_TARGETS run_image_slam_resize)#自己添加 resize图像定位文件夹例子
add_executable(run_image_localization_resize run_image_localization_resize.cc util/image_util.cc)
list(APPEND EXECUTABLE_TARGETS run_image_localization_resize)

  

http://www.jsqmd.com/news/669368/

相关文章:

  • Matlab R2023a绘图避坑:xlabel设置后不显示?教你排查字体、坐标区与对象句柄问题
  • AI赋能供应链:从SCM、SRM到MDM,智能技术如何重塑核心概念与协同
  • 宝塔面板日志文件过大_配置日志轮转与定时清理
  • 保姆级教程:用Abaqus搞定气动软体抓手的仿真建模(从材料设置到结果提取)
  • 法规标准-UN R157:自动驾驶L3级认证的“安全基石”与测试挑战
  • 从‘MOVED’错误到丝滑重定向:深入理解Redis集群客户端如何与16384个Slot打交道
  • 别再为通信失败头疼!手把手调试FR336 RFID读写器与三菱PLC的Modbus RTU连接
  • JumpServer自动化运维避坑手册:Ansible作业调度那些容易踩的5个雷(含容器权限隔离最佳实践)
  • 工业肌肉:08 伺服最容易坏在哪里?工程师最怕的 10 个坑
  • STM32实战 | 基于AD7606并行接口的高效多通道数据采集方案
  • 别再只测本地了!手把手教你配置Mosquitto MQTT代理,让外网设备也能连上
  • 轨道角动量OAM超表面设计:自旋到轨道角动量转换与几何相位调控的FDTD仿真研究
  • 从理论到实践:拆解TFT模型在业务时序预测中的核心优势与落地指南
  • 从Attention U-Net到UCTransNet:深入拆解通道Transformer(CCT/CCA)如何革新医学影像分割的‘特征融合’逻辑
  • python tilt
  • 【AGI自主学习底层逻辑】:20年AI架构师首度公开7大探索策略与3个致命误区
  • 硕飞SP328烧录器联机vs脱机模式选择指南:1G/2G/4G Flash实测对比
  • 教授专栏205| 胡文琪:开发全球首个仿生人工纤毛系统,为未来医疗及工程微型机械人应用开创新方向
  • Mac上播放H264直播流的终极方案:从VideoToolbox硬解到AVSampleBufferDisplayLayer的保姆级踩坑实录
  • 从面试官视角看CV:那些年我们踩过的OCR面试坑,附CRNN/DB/CTPN高频考点解析
  • 新国标下的电子产品认证换版指南:聚焦GB 4943.1-2022与GB/T 9254.1-2021核心变化与应对策略
  • 别再到处找脚本了!Windows 11家庭版一键解锁组策略(gpedit.msc)的保姆级教程
  • VerilogA实战:构建8位十进制转二进制转换器的核心逻辑与仿真验证
  • 入职两年,我以为和同事关系很好。离职那天,没有一个人来送我,连微信都没人发。才明白,那叫同事,不叫朋友
  • 代码复现: 《含多微网租赁共享储能的配电网博弈优化调度》 首先利用NSGA-II算法求解三个微...
  • 告别KVM切换器!用微软官方免费神器Mouse without Borders,一套键鼠搞定四台Windows电脑
  • LeetCode刷题必备:用单调栈5分钟搞定‘直方图最大矩形’和‘子数组最值差’两道经典题
  • 第33篇:AI+教育新玩法——个性化学习助手与智能课件生成(项目实战)
  • Hyper-V SR-IOV实战:从硬件检测到虚拟机网络性能飞跃
  • 别再只用CBC了!AES加密的ECB、CTR、XTS模式到底该怎么选?附场景对比表