add color line detection and supplementary files

This commit is contained in:
lxm 2023-06-28 18:46:47 +08:00
parent e21327dd71
commit f9eb5c3205
14 changed files with 644 additions and 27 deletions

View File

@ -69,6 +69,7 @@ include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/cuda
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/tracking/ocv470
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/color_line
${CMAKE_CURRENT_SOURCE_DIR}/video_io
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/ellipse_det
${CMAKE_CURRENT_SOURCE_DIR}/utils
@ -108,6 +109,7 @@ set(
include/sv_common_det.h
include/sv_landing_det.h
include/sv_tracking.h
include/sv_color_line.h
include/sv_video_input.h
include/sv_video_output.h
include/sv_world.h
@ -148,10 +150,13 @@ set(spirecv_SRCS
algorithm/common_det/sv_common_det.cpp
algorithm/landing_det/sv_landing_det.cpp
algorithm/tracking/sv_tracking.cpp
algorithm/color_line/sv_color_line.cpp
)
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/tracking/ocv470/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/color_line/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp)
@ -237,7 +242,7 @@ elseif(PLATFORM STREQUAL "X86_CPU")
endif()
endif()
#demo
add_executable(ArucoDetection samples/demo/aruco_detection.cpp)
target_link_libraries(ArucoDetection sv_world)
add_executable(CameraReading samples/demo/camera_reading.cpp)
@ -252,6 +257,8 @@ add_executable(LandingMarkerDetection samples/demo/landing_marker_detection.cpp)
target_link_libraries(LandingMarkerDetection sv_world)
add_executable(SingleObjectTracking samples/demo/single_object_tracking.cpp)
target_link_libraries(SingleObjectTracking sv_world)
add_executable(ColorLineDetection samples/demo/color_line_detect.cpp)
target_link_libraries(ColorLineDetection sv_world)
add_executable(UdpDetectionInfoReceiver samples/demo/udp_detection_info_receiver.cpp)
target_link_libraries(UdpDetectionInfoReceiver sv_world)
add_executable(UdpDetectionInfoSender samples/demo/udp_detection_info_sender.cpp)
@ -260,12 +267,17 @@ add_executable(VideoSaving samples/demo/video_saving.cpp)
target_link_libraries(VideoSaving sv_world)
add_executable(VideoStreaming samples/demo/video_streaming.cpp)
target_link_libraries(VideoStreaming sv_world)
add_executable(GimbalClickedTracking samples/demo/gimbal_detection_with_clicked_tracking.cpp)
target_link_libraries(GimbalClickedTracking sv_world)
add_executable(GimbalLandingMarkerDetection samples/demo/gimbal_landing_marker_detection.cpp)
target_link_libraries(GimbalLandingMarkerDetection sv_world)
add_executable(GimbalUdpDetectionInfoSender samples/demo/gimbal_udp_detection_info_sender.cpp)
target_link_libraries(GimbalUdpDetectionInfoSender sv_world)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/samples/calib)
add_executable(CameraCalibrarion samples/calib/calibrate_camera_charuco.cpp)
target_link_libraries(CameraCalibrarion ${OpenCV_LIBS})
message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
if (NOT DEFINED SV_INSTALL_PREFIX)
set(SV_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX})

View File

@ -10,7 +10,7 @@ SpireCV是一个专为**智能无人系统**打造的**边缘实时感知SDK**
## 快速入门
- 安装及使用:[SpireCV使用手册](https://wiki.amovlab.com/public/spirecv-wiki/)
- 安装及使用:[SpireCV使用手册](https://www.wolai.com/4qWFM6aZmtpQE6jj7hnNMW)
- 需掌握C++语言基础、CMake编译工具基础。
- 需要掌握OpenCV视觉库基础了解CUDA、OpenVINO、RKNN和CANN等计算库。
- 需要了解ROS基本概念及基本操作。

View File

@ -0,0 +1,252 @@
#include "sv_color_line.h"
#include "gason.h"
#include "sv_util.h"
#include <cmath>
#include <fstream>
namespace sv {
ColorLineDetector::ColorLineDetector()
{
this->is_load_parameter = false;
}
ColorLineDetector::~ColorLineDetector()
{
}
void ColorLineDetector::_load()
{
JsonValue all_value;
JsonAllocator allocator;
_load_all_json(this->alg_params_fn, all_value, allocator);
JsonValue colorliner_params_value;
_parser_algorithm_params("ColorLineDetector", all_value, colorliner_params_value);
for (auto i : colorliner_params_value) {
if ("line_color" == std::string(i->key)) {
this->line_color = i->value.toString();
std::cout << "line_color: " << this->line_color << std::endl;
}
else if ("line_location" == std::string(i->key)) {
this->line_location = i->value.toNumber();
}
else if ("line_location_a1" == std::string(i->key)) {
this->line_location_a1 = i->value.toNumber();
}
else if ("line_location_a2" == std::string(i->key)) {
this->line_location_a2 = i->value.toNumber();
}
}
}
void ColorLineDetector::get_line_area(cv::Mat &frame, cv::Mat &line_area, cv::Mat &line_area_a1, cv::Mat &line_area_a2) {
int h = frame.rows;
half_h = h / 2.0;
half_w = frame.cols / 2.0;
int l1 = int(h * (1 - line_location - 0.05));
int l2 = int(h * (1 - line_location));
line_area = frame(cv::Range(l1, l2), cv::Range::all());
l1 = int(h * (1 - line_location_a1 - 0.05));
l2 = int(h * (1 - line_location_a1));
line_area_a1 = frame(cv::Range(l1, l2), cv::Range::all());
cy_a1 = l1;
l1 = int(h * (1 - line_location_a2 - 0.05));
l2 = int(h * (1 - line_location_a2));
cy_a2 = l1;
line_area_a2 = frame(cv::Range(l1, l2), cv::Range::all());
}
float ColorLineDetector::cnt_area(std::vector<cv::Point> cnt) {
float area = cv::contourArea(cnt);
return area;
}
void ColorLineDetector::seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat line_area_a2, std::string _line_color, cv::Point &center, int &area, cv::Point &center_a1, cv::Point &center_a2) {
int hmin, smin, vmin, hmax, smax, vmax;
if (_line_color == "black") {
hmin = 0;
smin = 0;
vmin = 0;
hmax = 180;
smax = 255;
vmax = 46;
}
else if (_line_color == "red") {
hmin = 0;
smin = 43;
vmin = 46;
hmax = 10;
smax = 255;
vmax = 255;
}
else if (_line_color == "yellow") {
hmin = 26;
smin = 43;
vmin = 46;
hmax = 34;
smax = 255;
vmax = 255;
}
else if (_line_color == "green") {
hmin = 35;
smin = 43;
vmin = 46;
hmax = 77;
smax = 255;
vmax = 255;
}
else if (_line_color == "blue") {
hmin = 100;
smin = 43;
vmin = 46;
hmax = 124;
smax = 255;
vmax = 255;
}
else {
hmin = 0;
smin = 0;
vmin = 0;
hmax = 180;
smax = 255;
vmax = 46;
}
cv::cvtColor(line_area, line_area, cv::COLOR_BGR2HSV);
cv::inRange(line_area, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area);
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
cv::morphologyEx(line_area, line_area, cv::MORPH_OPEN, kernel);
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(line_area, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
if (contours.size() > 0) {
cv::Rect rect = cv::boundingRect(contours[0]);
int cx = rect.x + rect.width / 2;
int cy = rect.y + rect.height / 2;
std::sort(contours.begin(), contours.end(),[](const std::vector<cv::Point>& a, const std::vector<cv::Point>& b) {return cv::contourArea(a) > cv::contourArea(b);});
area = cnt_area(contours[0]);
center = cv::Point(cx, cy);
}
cv::cvtColor(line_area_a1, line_area_a1, cv::COLOR_BGR2HSV);
cv::inRange(line_area_a1, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_a1);
//cv2.MORPH_CLOSE 先进行膨胀,再进行腐蚀操作
kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
cv::morphologyEx(line_area_a1, line_area_a1, cv::MORPH_CLOSE, kernel);
std::vector<std::vector<cv::Point>> contours_a1;
cv::findContours(line_area_a1, contours_a1, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
if (contours_a1.size() > 0) {
cv::Rect rect = cv::boundingRect(contours_a1[0]);
int cx = rect.x + rect.width / 2;
int cy = rect.y + rect.height / 2 + cy_a1;
center_a1 = cv::Point(cx - half_w, cy - half_h);
}
cv::cvtColor(line_area_a2, line_area_a2, cv::COLOR_BGR2HSV);
cv::inRange(line_area_a2, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_a2);
kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
cv::morphologyEx(line_area_a2, line_area_a2, cv::MORPH_CLOSE, kernel);
std::vector<std::vector<cv::Point>> contours_a2;
cv::findContours(line_area_a2, contours_a2, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
if (contours_a2.size() > 0) {
cv::Rect rect = cv::boundingRect(contours_a2[0]);
int cx = rect.x + rect.width / 2;
int cy = rect.y + rect.height / 2 + cy_a2;
center_a2 = cv::Point(cx - half_w, cy - half_h);
}
}
void ColorLineDetector::detect(cv::Mat img, sv::TargetsInFrame& tgts_)
{
if(!this->is_load_parameter)
{
_load();
this->is_load_parameter = true;
}
int area_n = -1;
cv::Mat area_base, area_base_a1, area_base_a2;
cv::Point cxcy_n(0, 0), center_a1_n(0,0), center_a2_n(0, 0);
get_line_area(img, area_base, area_base_a1, area_base_a2);
seg(area_base, area_base_a1, area_base_a2, line_color, cxcy_n, area_n, center_a1_n, center_a2_n);
pose.x = 0.0;
pose.y = -1.0;
pose.z = 0.0;
if (area_n > 0)
{
circle(area_base, cv::Point(cxcy_n.x, cxcy_n.y), 4, cv::Scalar(0, 0, 255), -1);
double angle = (cxcy_n.x - this->camera_matrix.at<double>(0, 2)) / this->camera_matrix.at<double>(0, 2) * atan((double)(area_base.rows / 2) / this->fov_x);
pose.x = angle;
pose.y = 1.0;
}
else
{
cv::Point cxcy__n(0, 0), center_a1__n(0, 0), center_a2__n(0, 0);
seg(area_base, area_base_a1, area_base_a2, line_color, cxcy__n, area_n = 0, center_a1__n, center_a2__n);
if (area_n > 0)
{
circle(area_base, cv::Point(cxcy_n.x, cxcy_n.y), 4, cv::Scalar(0, 0, 255), -1);
double angle = (cxcy_n.x - this->camera_matrix.at<double>(0, 2)) / this->camera_matrix.at<double>(0, 2) * atan((double)(area_base.rows / 2) / this->fov_x);
pose.x = angle;
pose.y = 1.0;
pose.z = 0.0;
}
}
tgts_.setSize(img.cols, img.rows);
tgts_.setFOV(this->fov_x, this->fov_y);
auto t1 = std::chrono::system_clock::now();
tgts_.setFPS(1000.0/std::chrono::duration_cast<std::chrono::milliseconds>(t1- this->_t0).count());
this->_t0 = std::chrono::system_clock::now();
tgts_.setTimeNow();
if(area_n > 0)
{
Target tgt;
tgt.los_ax = pose.x;
if(cxcy_n.x !=0 || cxcy_n.y !=0)
{
tgt.cx = cxcy_n.x;
tgt.cy = cxcy_n.y;
}
else if(center_a1_n.x != 0 || center_a1_n.y != 0)
{
tgt.cx = center_a1_n.x;
tgt.cy = center_a1_n.y;
}
else if(center_a2_n.x != 0 || center_a2_n.y != 0)
{
tgt.cx = center_a2_n.x;
tgt.cy = center_a2_n.y;
}
tgts_.targets.push_back(tgt);
}
}
}

48
include/sv_color_line.h Normal file
View File

@ -0,0 +1,48 @@
#ifndef __SV_COLOR_LINE__
#define __SV_COLOR_LINE__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <string>
#include <chrono>
namespace sv {
class ColorLineDetector : public CameraAlgorithm
{
public:
ColorLineDetector();
~ColorLineDetector();
void detect(cv::Mat img_, TargetsInFrame& tgts_);
protected:
float cy_a1;
float cy_a2;
float half_h;
float half_w;
cv::Mat img;
cv::Point3d pose;
double line_location;
double line_location_a1;
double line_location_a2;
bool is_load_parameter;
std::string line_color;
void _load();
float cnt_area(std::vector<cv::Point> cnt);
void get_line_area(cv::Mat &frame, cv::Mat &line_area, cv::Mat &line_area_a1, cv::Mat &line_area_a2);
void seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat line_area_a2, std::string _line_color, cv::Point &center, int &area, cv::Point &center_a1, cv::Point &center_a2);
};
}
#endif

View File

@ -5,6 +5,7 @@
#include "sv_common_det.h"
#include "sv_landing_det.h"
#include "sv_tracking.h"
#include "sv_color_line.h"
#include "sv_video_input.h"
#include "sv_video_output.h"

View File

@ -0,0 +1,185 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"inputSize": 640,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": true,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],
"bus": [10, 3],
"truck": [-1, -1],
"bike": [-1, -1],
"train": [-1, -1],
"boat": [-1, -1],
"aeroplane": [-1, -1]
},
"datasetDrone": {
"drone": [0.4, 0.2]
},
"datasetCOCO": {
"person": [-1, -1],
"bicycle": [-1, -1],
"car": [-1, -1],
"motorcycle": [-1, -1],
"airplane": [-1, -1],
"bus": [-1, -1],
"train": [-1, -1],
"truck": [-1, -1],
"boat": [-1, -1],
"traffic light": [-1, -1],
"fire hydrant": [-1, -1],
"stop sign": [-1, -1],
"parking meter": [-1, -1],
"bench": [-1, -1],
"bird": [-1, -1],
"cat": [-1, -1],
"dog": [-1, -1],
"horse": [-1, -1],
"sheep": [-1, -1],
"cow": [-1, -1],
"elephant": [-1, -1],
"bear": [-1, -1],
"zebra": [-1, -1],
"giraffe": [-1, -1],
"backpack": [-1, -1],
"umbrella": [-1, -1],
"handbag": [-1, -1],
"tie": [-1, -1],
"suitcase": [-1, -1],
"frisbee": [-1, -1],
"skis": [-1, -1],
"snowboard": [-1, -1],
"sports ball": [-1, -1],
"kite": [-1, -1],
"baseball bat": [-1, -1],
"baseball glove": [-1, -1],
"skateboard": [-1, -1],
"surfboard": [-1, -1],
"tennis racket": [-1, -1],
"bottle": [-1, -1],
"wine glass": [-1, -1],
"cup": [-1, -1],
"fork": [-1, -1],
"knife": [-1, -1],
"spoon": [-1, -1],
"bowl": [-1, -1],
"banana": [-1, -1],
"apple": [-1, -1],
"sandwich": [-1, -1],
"orange": [-1, -1],
"broccoli": [-1, -1],
"carrot": [-1, -1],
"hot dog": [-1, -1],
"pizza": [-1, -1],
"donut": [-1, -1],
"cake": [-1, -1],
"chair": [-1, -1],
"couch": [-1, -1],
"potted plant": [-1, -1],
"bed": [-1, -1],
"dining table": [-1, -1],
"toilet": [-1, -1],
"tv": [-1, -1],
"laptop": [-1, -1],
"mouse": [-1, -1],
"remote": [-1, -1],
"keyboard": [-1, -1],
"cell phone": [-1, -1],
"microwave": [-1, -1],
"oven": [-1, -1],
"toaster": [-1, -1],
"sink": [-1, -1],
"refrigerator": [-1, -1],
"book": [-1, -1],
"clock": [-1, -1],
"vase": [-1, -1],
"scissors": [-1, -1],
"teddy bear": [-1, -1],
"hair drier": [-1, -1],
"toothbrush": [-1, -1]
}
},
"AutoFocusObjectDetector": {
"lock_thres": 5,
"unlock_thres": 5,
"lock_scale_init": 12.0,
"lock_scale": 8.0,
"categories_filter": [],
"keep_unlocked": false,
"use_square_region": false
},
"SingleObjectTracker": {
"algorithm": "nano",
"backend": 0,
"target": 0
},
"LandingMarkerDetector": {
"labels": ["h"],
"maxCandidates": 5
},
"EllipseDetector": {
"radiusInMeter": 0.5,
"preProcessingGaussKernel": 5,
"preProcessingGaussSigma": 1.306,
"thPosition": 1.0,
"maxCenterDistance": 0.05,
"minEdgeLength": 9,
"minOrientedRectSide": 2.984,
"distanceToEllipseContour": 0.111,
"minScore": 0.511,
"minReliability": 0.470,
"ns": 22,
"percentNe": 0.946,
"T_CNC": 0.121,
"T_TCN_L": 0.468,
"T_TCN_P": 0.560,
"thRadius": 0.202
},
"ArucoDetector": {
"dictionaryId": 10,
"markerIds": [-1],
"markerLengths": [0.2],
"adaptiveThreshConstant": 7,
"adaptiveThreshWinSizeMax": 23,
"adaptiveThreshWinSizeMin": 3,
"adaptiveThreshWinSizeStep": 10,
"aprilTagCriticalRad": 0.17453292519,
"aprilTagDeglitch": 0,
"aprilTagMaxLineFitMse": 10.0,
"aprilTagMaxNmaxima": 10,
"aprilTagMinClusterPixels": 5,
"aprilTagMinWhiteBlackDiff": 5,
"aprilTagQuadDecimate": 0.0,
"aprilTagQuadSigma": 0.0,
"cornerRefinementMaxIterations": 30,
"cornerRefinementMethod": 0,
"cornerRefinementMinAccuracy": 0.1,
"cornerRefinementWinSize": 5,
"detectInvertedMarker": false,
"errorCorrectionRate": 0.6,
"markerBorderBits": 1,
"maxErroneousBitsInBorderRate": 0.35,
"maxMarkerPerimeterRate": 4.0,
"minCornerDistanceRate": 0.05,
"minDistanceToBorder": 3,
"minMarkerDistanceRate": 0.05,
"minMarkerLengthRatioOriginalImg": 0,
"minMarkerPerimeterRate": 0.03,
"minOtsuStdDev": 5.0,
"minSideLengthCanonicalImg": 32,
"perspectiveRemoveIgnoredMarginPerCell": 0.13,
"perspectiveRemovePixelPerCell": 4,
"polygonalApproxAccuracyRate": 0.03,
"useAruco3Detection": false
},
"ColorLineDetector": {
"line_color": "black",
"line_location": 0.5,
"line_location_a1": 0.3,
"line_location_a2": 0.7,
}
}

View File

@ -0,0 +1,20 @@
%YAML:1.0
---
calibration_time: "2021年01月12日 星期二 18时08分01秒"
image_width: 1280
image_height: 720
flags: 0
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 7.9379415710551370e+02, 0., 2.9783879354295328e+02, 0.,
7.9491985564466654e+02, 3.0942416136837386e+02, 0., 0., 1. ]
distortion_coefficients: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ 2.0950200339181715e-01, -1.1587468096518483e+00,
5.5342063671841328e-03, 2.2214393775334758e-04,
1.7127431916651392e+00 ]
avg_reprojection_error: 2.8342964851391211e-01

View File

@ -0,0 +1,20 @@
%YAML:1.0
---
calibration_time: "2021年01月12日 星期二 18时08分01秒"
image_width: 1920
image_height: 1080
flags: 0
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 7.9379415710551370e+02, 0., 2.9783879354295328e+02, 0.,
7.9491985564466654e+02, 3.0942416136837386e+02, 0., 0., 1. ]
distortion_coefficients: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ 2.0950200339181715e-01, -1.1587468096518483e+00,
5.5342063671841328e-03, 2.2214393775334758e-04,
1.7127431916651392e+00 ]
avg_reprojection_error: 2.8342964851391211e-01

View File

@ -0,0 +1,20 @@
%YAML:1.0
---
calibration_time: "2021年01月12日 星期二 18时08分01秒"
image_width: 640
image_height: 480
flags: 0
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 7.9379415710551370e+02, 0., 2.9783879354295328e+02, 0.,
7.9491985564466654e+02, 3.0942416136837386e+02, 0., 0., 1. ]
distortion_coefficients: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ 2.0950200339181715e-01, -1.1587468096518483e+00,
5.5342063671841328e-03, 2.2214393775334758e-04,
1.7127431916651392e+00 ]
avg_reprojection_error: 2.8342964851391211e-01

View File

@ -0,0 +1,69 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
using namespace sv;
int main(int argc, char *argv[]) {
// 实例化 圆形降落标志 检测器类
sv::ColorLineDetector cld;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cld.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
cap.setWH(640, 480);
//cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(cld.image_width, cld.image_height));
// 执行 降落标志 检测
cld.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印 降落标志 检测结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
for (int i=0; i<tgts.targets.size(); i++)
{
// 打印每个 color_line 的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Color Line detect Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].cx, tgts.targets[i].cy);
// 打印每个color_line的x_方向反正切值跟相机视场相关
printf(" Color Line detect Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
}
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -92,7 +92,7 @@ int main(int argc, char *argv[])
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
gimbal.cap.read(img);
cap.read(img);
cv::resize(img, img, cv::Size(cod.image_width, cod.image_height));
// 执行通用目标检测
@ -159,7 +159,7 @@ int main(int argc, char *argv[])
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
gimbal.cap.read(img);
cap.read(img);
cv::resize(img, img, cv::Size(sot.image_width, sot.image_height));
// 开始 单目标跟踪 逻辑
// 是否有新的目标被手动框选

View File

@ -68,7 +68,7 @@ int main(int argc, char *argv[])
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
gimbal.cap.read(img);
cap.read(img);
cv::resize(img, img, cv::Size(lmd.image_width, lmd.image_height));
// 执行 降落标志 检测
@ -117,12 +117,12 @@ void onMouse(int event, int x, int y, int, void *)
{
if (event == cv::EVENT_LBUTTONDOWN)
{
gimbal.gimbal->setAngleRateEuler(0, 0.02 * (y - 720 / 2), 0.02 * (x - 1280 / 2));
gimbal->setAngleRateEuler(0, 0.02 * (y - 720 / 2), 0.02 * (x - 1280 / 2));
}
if (event == cv::EVENT_RBUTTONDOWN)
{
gimbalNoTrack();
gimbal.gimbal->setHome();
gimbal->setHome();
}
}

View File

@ -47,8 +47,8 @@ int main(int argc, char *argv[])
gimbal = new sv::Gimbal(sv::GimbalType::G1, sv::GimbalLink::SERIAL);
// 使用 /dev/ttyUSB0 作为控制串口
gimbal->setSerialPort("/dev/ttyUSB0");
// 以GimableCallback作为状态回调函数启用吊舱控制
gimbal->open(GimableCallback);
// 以gimableCallback作为状态回调函数启用吊舱控制
gimbal->open(gimableCallback);
// 定义相机
sv::Camera cap;
// 设置相机流媒体地址为 192.168.2.64
@ -67,7 +67,7 @@ int main(int argc, char *argv[])
// 实例化Aruco检测器类
sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams("/home/amov/SpireCV/calib_webcam_640x480.yaml");
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
sv::UDPServer udp;
// 实例化OpenCV的Mat类用于内存单帧图像
@ -78,7 +78,7 @@ int main(int argc, char *argv[])
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
gimbal->cap.read(img);
cap.read(img);
// 执行Aruco二维码检测
ad.detect(img, tgts);
@ -88,18 +88,9 @@ int main(int argc, char *argv[])
tgts.pod_roll = gimbalEulerAngle[1];
tgts.pod_yaw = gimbalEulerAngle[0];
tgts.has_uav_pos = true;
tgts.longitude = 1.1234567;
tgts.latitude = 2.2345678;
tgts.altitude = 3.3456789;
tgts.has_uav_vel = true;
tgts.uav_vx = 4;
tgts.uav_vy = 5;
tgts.uav_vz = 6;
tgts.has_ill = true;
tgts.illumination = 7;
tgts.has_uav_pos = false;
tgts.has_uav_vel = false;
tgts.has_ill = false;
// www.write(img, tgts);
udp.send(tgts);
@ -118,10 +109,10 @@ void onMouse(int event, int x, int y, int, void *)
{
if (event == cv::EVENT_LBUTTONDOWN)
{
gimbal->gimbal->setAngleRateEuler(0, 0.02 * (y - 720 / 2), 0.02 * (x - 1280 / 2));
gimbal->setAngleRateEuler(0, 0.02 * (y - 720 / 2), 0.02 * (x - 1280 / 2));
}
if (event == cv::EVENT_RBUTTONDOWN)
{
gimbal->gimbal->setHome();
gimbal->setHome();
}
}

View File

@ -12,7 +12,6 @@ if [ ! -d ${root_dir} ]; then
fi
cd ${root_dir}
git clone https://gitee.com/jario-jin/nv-codec-headers.git
cd nv-codec-headers
git checkout n11.1.5.0