!7 add color line detection, and MIPI interface.

Merge pull request !7 from Daniel/lx
This commit is contained in:
jario-jin 2023-07-05 07:55:14 +00:00 committed by Gitee
commit d14db1140d
8 changed files with 401 additions and 1 deletions

View File

@ -69,6 +69,7 @@ include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/cuda
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/tracking/ocv470
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/color_line
${CMAKE_CURRENT_SOURCE_DIR}/video_io
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/ellipse_det
${CMAKE_CURRENT_SOURCE_DIR}/utils
@ -108,6 +109,7 @@ set(
include/sv_common_det.h
include/sv_landing_det.h
include/sv_tracking.h
include/sv_color_line.h
include/sv_video_input.h
include/sv_video_output.h
include/sv_world.h
@ -148,10 +150,13 @@ set(spirecv_SRCS
algorithm/common_det/sv_common_det.cpp
algorithm/landing_det/sv_landing_det.cpp
algorithm/tracking/sv_tracking.cpp
algorithm/color_line/sv_color_line.cpp
)
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/tracking/ocv470/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/color_line/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp)
@ -252,6 +257,8 @@ add_executable(LandingMarkerDetection samples/demo/landing_marker_detection.cpp)
target_link_libraries(LandingMarkerDetection sv_world)
add_executable(SingleObjectTracking samples/demo/single_object_tracking.cpp)
target_link_libraries(SingleObjectTracking sv_world)
add_executable(ColorLineDetection samples/demo/color_line_detect.cpp)
target_link_libraries(ColorLineDetection sv_world)
add_executable(UdpDetectionInfoReceiver samples/demo/udp_detection_info_receiver.cpp)
target_link_libraries(UdpDetectionInfoReceiver sv_world)
add_executable(UdpDetectionInfoSender samples/demo/udp_detection_info_sender.cpp)

View File

@ -0,0 +1,263 @@
#include "sv_color_line.h"
#include "gason.h"
#include "sv_util.h"
#include <cmath>
#include <fstream>
namespace sv
{
ColorLineDetector::ColorLineDetector()
{
this->is_load_parameter = false;
}
ColorLineDetector::~ColorLineDetector()
{
}
void ColorLineDetector::_load()
{
JsonValue all_value;
JsonAllocator allocator;
_load_all_json(this->alg_params_fn, all_value, allocator);
JsonValue colorliner_params_value;
_parser_algorithm_params("ColorLineDetector", all_value, colorliner_params_value);
for (auto i : colorliner_params_value)
{
if ("line_color" == std::string(i->key))
{
this->line_color = i->value.toString();
std::cout << "line_color: " << this->line_color << std::endl;
}
else if ("line_location" == std::string(i->key))
{
this->line_location = i->value.toNumber();
}
else if ("line_location_a1" == std::string(i->key))
{
this->line_location_a1 = i->value.toNumber();
}
else if ("line_location_a2" == std::string(i->key))
{
this->line_location_a2 = i->value.toNumber();
}
}
}
void ColorLineDetector::get_line_area(cv::Mat &frame_, cv::Mat &line_area_, cv::Mat &line_area_a1_, cv::Mat &line_area_a2_)
{
int h = frame_.rows;
_half_h = h / 2.0;
_half_w = frame_.cols / 2.0;
int l1 = int(h * (1 - line_location - 0.05));
int l2 = int(h * (1 - line_location));
line_area_ = frame_(cv::Range(l1, l2), cv::Range::all());
l1 = int(h * (1 - line_location_a1 - 0.05));
l2 = int(h * (1 - line_location_a1));
line_area_a1_ = frame_(cv::Range(l1, l2), cv::Range::all());
_cy_a1 = l1;
l1 = int(h * (1 - line_location_a2 - 0.05));
l2 = int(h * (1 - line_location_a2));
_cy_a2 = l1;
line_area_a2_ = frame_(cv::Range(l1, l2), cv::Range::all());
}
float ColorLineDetector::cnt_area(std::vector<cv::Point> cnt_)
{
float area = cv::contourArea(cnt_);
return area;
}
void ColorLineDetector::seg(cv::Mat line_area_, cv::Mat line_area_a1_, cv::Mat line_area_a2_, std::string line_color_, cv::Point &center_, int &area_, cv::Point &center_a1_, cv::Point &center_a2_)
{
int hmin, smin, vmin, hmax, smax, vmax;
if (line_color_ == "black")
{
hmin = 0;
smin = 0;
vmin = 0;
hmax = 180;
smax = 255;
vmax = 46;
}
else if (line_color_ == "red")
{
hmin = 0;
smin = 43;
vmin = 46;
hmax = 10;
smax = 255;
vmax = 255;
}
else if (line_color_ == "yellow")
{
hmin = 26;
smin = 43;
vmin = 46;
hmax = 34;
smax = 255;
vmax = 255;
}
else if (line_color_ == "green")
{
hmin = 35;
smin = 43;
vmin = 46;
hmax = 77;
smax = 255;
vmax = 255;
}
else if (line_color_ == "blue")
{
hmin = 100;
smin = 43;
vmin = 46;
hmax = 124;
smax = 255;
vmax = 255;
}
else
{
hmin = 0;
smin = 0;
vmin = 0;
hmax = 180;
smax = 255;
vmax = 46;
}
cv::cvtColor(line_area_, line_area_, cv::COLOR_BGR2HSV);
cv::inRange(line_area_, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_);
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
cv::morphologyEx(line_area_, line_area_, cv::MORPH_OPEN, kernel);
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(line_area_, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
if (contours.size() > 0)
{
cv::Rect rect = cv::boundingRect(contours[0]);
int cx = rect.x + rect.width / 2;
int cy = rect.y + rect.height / 2;
std::sort(contours.begin(), contours.end(),[](const std::vector<cv::Point> &a, const std::vector<cv::Point> &b)
{return cv::contourArea(a) > cv::contourArea(b);});
area_ = cnt_area(contours[0]);
center_ = cv::Point(cx, cy);
}
cv::cvtColor(line_area_a1_, line_area_a1_, cv::COLOR_BGR2HSV);
cv::inRange(line_area_a1_, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_a1_);
//cv2.MORPH_CLOSE 先进行膨胀,再进行腐蚀操作
kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
cv::morphologyEx(line_area_a1_, line_area_a1_, cv::MORPH_CLOSE, kernel);
std::vector<std::vector<cv::Point>> contours_a1;
cv::findContours(line_area_a1_, contours_a1, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
if (contours_a1.size() > 0){
cv::Rect rect = cv::boundingRect(contours_a1[0]);
int cx = rect.x + rect.width / 2;
int cy = rect.y + rect.height / 2 + _cy_a1;
center_a1_ = cv::Point(cx - _half_w, cy - _half_h);
}
cv::cvtColor(line_area_a2_, line_area_a2_, cv::COLOR_BGR2HSV);
cv::inRange(line_area_a2_, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_a2_);
kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
cv::morphologyEx(line_area_a2_, line_area_a2_, cv::MORPH_CLOSE, kernel);
std::vector<std::vector<cv::Point>> contours_a2;
cv::findContours(line_area_a2_, contours_a2, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
if (contours_a2.size() > 0)
{
cv::Rect rect = cv::boundingRect(contours_a2[0]);
int cx = rect.x + rect.width / 2;
int cy = rect.y + rect.height / 2 + _cy_a2;
center_a2_ = cv::Point(cx - _half_w, cy - _half_h);
}
}
void ColorLineDetector::detect(cv::Mat img_, sv::TargetsInFrame &tgts_)
{
if (!this->is_load_parameter)
{
_load();
this->is_load_parameter = true;
}
int area_n = -1;
cv::Mat area_base, area_base_a1, area_base_a2;
cv::Point cxcy_n(0, 0), center_a1_n(0, 0), center_a2_n(0, 0);
get_line_area(img_, area_base, area_base_a1, area_base_a2);
seg(area_base, area_base_a1, area_base_a2, line_color, cxcy_n, area_n, center_a1_n, center_a2_n);
pose.x = 0.0;
pose.y = -1.0;
pose.z = 0.0;
if (area_n > 0)
{
circle(area_base, cv::Point(cxcy_n.x, cxcy_n.y), 4, cv::Scalar(0, 0, 255), -1);
double angle = (cxcy_n.x - this->camera_matrix.at<double>(0, 2)) / this->camera_matrix.at<double>(0, 2) * atan((double)(area_base.rows / 2) / this->fov_x);
pose.x = angle;
pose.y = 1.0;
}
else
{
cv::Point cxcy__n(0, 0), center_a1__n(0, 0), center_a2__n(0, 0);
seg(area_base, area_base_a1, area_base_a2, line_color, cxcy__n, area_n = 0, center_a1__n, center_a2__n);
if (area_n > 0)
{
circle(area_base, cv::Point(cxcy_n.x, cxcy_n.y), 4, cv::Scalar(0, 0, 255), -1);
double angle = (cxcy_n.x - this->camera_matrix.at<double>(0, 2)) / this->camera_matrix.at<double>(0, 2) * atan((double)(area_base.rows / 2) / this->fov_x);
pose.x = angle;
pose.y = 1.0;
pose.z = 0.0;
}
}
tgts_.setSize(img_.cols, img_.rows);
tgts_.setFOV(this->fov_x, this->fov_y);
auto t1 = std::chrono::system_clock::now();
tgts_.setFPS(1000.0 / std::chrono::duration_cast<std::chrono::milliseconds>(t1 - this->_t0).count());
this->_t0 = std::chrono::system_clock::now();
tgts_.setTimeNow();
if (area_n > 0)
{
Target tgt;
tgt.los_ax = pose.x;
if (cxcy_n.x != 0 || cxcy_n.y != 0)
{
tgt.cx = cxcy_n.x;
tgt.cy = cxcy_n.y;
}
else if (center_a1_n.x != 0 || center_a1_n.y != 0)
{
tgt.cx = center_a1_n.x;
tgt.cy = center_a1_n.y;
}
else if (center_a2_n.x != 0 || center_a2_n.y != 0)
{
tgt.cx = center_a2_n.x;
tgt.cy = center_a2_n.y;
}
tgts_.targets.push_back(tgt);
}
}
}

43
include/sv_color_line.h Normal file
View File

@ -0,0 +1,43 @@
#ifndef __SV_COLOR_LINE__
#define __SV_COLOR_LINE__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <string>
#include <chrono>
namespace sv
{
class ColorLineDetector : public CameraAlgorithm
{
public:
ColorLineDetector();
~ColorLineDetector();
void detect(cv::Mat img_, TargetsInFrame &tgts_);
cv::Point3d pose;
double line_location;
double line_location_a1;
double line_location_a2;
bool is_load_parameter;
std::string line_color;
protected:
float _cy_a1;
float _cy_a2;
float _half_h;
float _half_w;
void _load();
float cnt_area(std::vector<cv::Point> cnt_);
void get_line_area(cv::Mat &frame_, cv::Mat &line_area_, cv::Mat &line_area_a1_, cv::Mat &line_area_a2_);
void seg(cv::Mat line_area_, cv::Mat line_area_a1_, cv::Mat line_area_a2_, std::string line_color_, cv::Point &center_, int &area_, cv::Point &center_a1_, cv::Point &center_a2_);
};
}
#endif

View File

@ -323,7 +323,7 @@ protected:
};
enum class CameraType {NONE, WEBCAM, G1, Q10};
enum class CameraType {NONE, WEBCAM, G1, Q10, MIPI};
class CameraBase {
public:

View File

@ -5,6 +5,7 @@
#include "sv_common_det.h"
#include "sv_landing_det.h"
#include "sv_tracking.h"
#include "sv_color_line.h"
#include "sv_video_input.h"
#include "sv_video_output.h"

View File

@ -174,5 +174,12 @@
"perspectiveRemovePixelPerCell": 4,
"polygonalApproxAccuracyRate": 0.03,
"useAruco3Detection": false
},
"ColorLineDetector": {
"line_color": "black",
"line_location": 0.5,
"line_location_a1": 0.3,
"line_location_a2": 0.7
}
}

View File

@ -0,0 +1,62 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
using namespace sv;
int main(int argc, char *argv[])
{
// 实例化 color line detection 检测器类
sv::ColorLineDetector cld;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cld.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(cld.image_width, cld.image_height));
// 执行 color line detection 检测
cld.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印 color line detection 检测结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
for (int i = 0; i < tgts.targets.size(); i++)
{
// 打印每个 color_line 的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Color Line detect Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].cx, tgts.targets[i].cy);
// 打印每个color_line的x_方向反正切值跟相机视场相关
printf(" Color Line detect Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
}
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -70,6 +70,23 @@ void Camera::openImpl()
sprintf(pipe, "rtspsrc location=rtsp://%s:%d/H264?W=%d&H=%d&FPS=%d&BR=4000000 latency=100 ! application/x-rtp,media=video ! rtph264depay ! parsebin ! nvv4l2decoder enable-max-performancegst=1 ! nvvidconv ! video/x-raw,format=(string)BGRx ! videoconvert ! appsink sync=false", this->_ip.c_str(), this->_port, this->_width, this->_height, this->_fps);
this->_cap.open(pipe, cv::CAP_GSTREAMER);
}
else if (this->_type == CameraType::MIPI)
{
char pipe[512];
this->_cap.open(this->_camera_id);
if (this->_width <= 0 || this->_height <= 0)
{
this->_width = 1280;
this->_height = 720;
}
if (this->_fps <= 0)
{
this->_fps = 30;
}
sprintf(pipe, "nvarguscamerasrc framerate=(fraction)%d/1 ! nvvidconv flip-method=0 ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink",this->_fps,this->_width,this->_height);
this->_cap.open(pipe, cv::CAP_GSTREAMER);
}
}