modefied some variables, abbreviation, and some files path bugs.

This commit is contained in:
lxm 2023-07-03 12:19:38 +08:00
parent e358c825d3
commit 0e6fcf98ca
16 changed files with 185 additions and 187 deletions

View File

@ -4,8 +4,11 @@
#include <cmath> #include <cmath>
#include <fstream> #include <fstream>
namespace sv
{
namespace sv {
ColorLineDetector::ColorLineDetector() ColorLineDetector::ColorLineDetector()
{ {
this->is_load_parameter = false; this->is_load_parameter = false;
@ -17,60 +20,67 @@ ColorLineDetector::~ColorLineDetector()
void ColorLineDetector::_load() void ColorLineDetector::_load()
{ {
JsonValue all_value; JsonValue all_value;
JsonAllocator allocator; JsonAllocator allocator;
_load_all_json(this->alg_params_fn, all_value, allocator); _load_all_json(this->alg_params_fn, all_value, allocator);
JsonValue colorliner_params_value; JsonValue colorliner_params_value;
_parser_algorithm_params("ColorLineDetector", all_value, colorliner_params_value); _parser_algorithm_params("ColorLineDetector", all_value, colorliner_params_value);
for (auto i : colorliner_params_value) {
if ("line_color" == std::string(i->key)) {
this->line_color = i->value.toString();
std::cout << "line_color: " << this->line_color << std::endl;
}
else if ("line_location" == std::string(i->key)) {
this->line_location = i->value.toNumber();
}
else if ("line_location_a1" == std::string(i->key)) {
this->line_location_a1 = i->value.toNumber();
}
else if ("line_location_a2" == std::string(i->key)) {
this->line_location_a2 = i->value.toNumber();
}
}
for (auto i : colorliner_params_value)
{
if ("line_color" == std::string(i->key))
{
this->line_color = i->value.toString();
std::cout << "line_color: " << this->line_color << std::endl;
}
else if ("line_location" == std::string(i->key))
{
this->line_location = i->value.toNumber();
}
else if ("line_location_a1" == std::string(i->key))
{
this->line_location_a1 = i->value.toNumber();
}
else if ("line_location_a2" == std::string(i->key))
{
this->line_location_a2 = i->value.toNumber();
}
}
} }
void ColorLineDetector::get_line_area(cv::Mat &frame, cv::Mat &line_area, cv::Mat &line_area_a1, cv::Mat &line_area_a2) { void ColorLineDetector::get_line_area(cv::Mat &frame_, cv::Mat &line_area_, cv::Mat &line_area_a1_, cv::Mat &line_area_a2_)
{
int h = frame.rows;
half_h = h / 2.0; int h = frame_.rows;
half_w = frame.cols / 2.0; _half_h = h / 2.0;
_half_w = frame_.cols / 2.0;
int l1 = int(h * (1 - line_location - 0.05)); int l1 = int(h * (1 - line_location - 0.05));
int l2 = int(h * (1 - line_location)); int l2 = int(h * (1 - line_location));
line_area = frame(cv::Range(l1, l2), cv::Range::all()); line_area_ = frame_(cv::Range(l1, l2), cv::Range::all());
l1 = int(h * (1 - line_location_a1 - 0.05)); l1 = int(h * (1 - line_location_a1 - 0.05));
l2 = int(h * (1 - line_location_a1)); l2 = int(h * (1 - line_location_a1));
line_area_a1 = frame(cv::Range(l1, l2), cv::Range::all()); line_area_a1_ = frame_(cv::Range(l1, l2), cv::Range::all());
cy_a1 = l1; _cy_a1 = l1;
l1 = int(h * (1 - line_location_a2 - 0.05)); l1 = int(h * (1 - line_location_a2 - 0.05));
l2 = int(h * (1 - line_location_a2)); l2 = int(h * (1 - line_location_a2));
cy_a2 = l1; _cy_a2 = l1;
line_area_a2 = frame(cv::Range(l1, l2), cv::Range::all()); line_area_a2_ = frame_(cv::Range(l1, l2), cv::Range::all());
} }
float ColorLineDetector::cnt_area(std::vector<cv::Point> cnt) { float ColorLineDetector::cnt_area(std::vector<cv::Point> cnt_)
float area = cv::contourArea(cnt); {
float area = cv::contourArea(cnt_);
return area; return area;
} }
void ColorLineDetector::seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat line_area_a2, std::string _line_color, cv::Point &center, int &area, cv::Point &center_a1, cv::Point &center_a2) { void ColorLineDetector::seg(cv::Mat line_area_, cv::Mat line_area_a1_, cv::Mat line_area_a2_, std::string line_color_, cv::Point &center_, int &area_, cv::Point &center_a1_, cv::Point &center_a2_)
{
int hmin, smin, vmin, hmax, smax, vmax; int hmin, smin, vmin, hmax, smax, vmax;
if (_line_color == "black") { if (line_color_ == "black")
{
hmin = 0; hmin = 0;
smin = 0; smin = 0;
vmin = 0; vmin = 0;
@ -78,7 +88,8 @@ void ColorLineDetector::seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat lin
smax = 255; smax = 255;
vmax = 46; vmax = 46;
} }
else if (_line_color == "red") { else if (line_color_ == "red")
{
hmin = 0; hmin = 0;
smin = 43; smin = 43;
vmin = 46; vmin = 46;
@ -86,7 +97,8 @@ void ColorLineDetector::seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat lin
smax = 255; smax = 255;
vmax = 255; vmax = 255;
} }
else if (_line_color == "yellow") { else if (line_color_ == "yellow")
{
hmin = 26; hmin = 26;
smin = 43; smin = 43;
vmin = 46; vmin = 46;
@ -94,7 +106,8 @@ void ColorLineDetector::seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat lin
smax = 255; smax = 255;
vmax = 255; vmax = 255;
} }
else if (_line_color == "green") { else if (line_color_ == "green")
{
hmin = 35; hmin = 35;
smin = 43; smin = 43;
vmin = 46; vmin = 46;
@ -102,7 +115,8 @@ void ColorLineDetector::seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat lin
smax = 255; smax = 255;
vmax = 255; vmax = 255;
} }
else if (_line_color == "blue") { else if (line_color_ == "blue")
{
hmin = 100; hmin = 100;
smin = 43; smin = 43;
vmin = 46; vmin = 46;
@ -110,7 +124,8 @@ void ColorLineDetector::seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat lin
smax = 255; smax = 255;
vmax = 255; vmax = 255;
} }
else { else
{
hmin = 0; hmin = 0;
smin = 0; smin = 0;
vmin = 0; vmin = 0;
@ -119,134 +134,130 @@ void ColorLineDetector::seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat lin
vmax = 46; vmax = 46;
} }
cv::cvtColor(line_area, line_area, cv::COLOR_BGR2HSV); cv::cvtColor(line_area_, line_area_, cv::COLOR_BGR2HSV);
cv::inRange(line_area, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area); cv::inRange(line_area_, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_);
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5)); cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
cv::morphologyEx(line_area, line_area, cv::MORPH_OPEN, kernel); cv::morphologyEx(line_area_, line_area_, cv::MORPH_OPEN, kernel);
std::vector<std::vector<cv::Point>> contours; std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy; std::vector<cv::Vec4i> hierarchy;
cv::findContours(line_area, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE); cv::findContours(line_area_, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
if (contours.size() > 0) { if (contours.size() > 0)
{
cv::Rect rect = cv::boundingRect(contours[0]); cv::Rect rect = cv::boundingRect(contours[0]);
int cx = rect.x + rect.width / 2; int cx = rect.x + rect.width / 2;
int cy = rect.y + rect.height / 2; int cy = rect.y + rect.height / 2;
std::sort(contours.begin(), contours.end(),[](const std::vector<cv::Point>& a, const std::vector<cv::Point>& b) {return cv::contourArea(a) > cv::contourArea(b);}); std::sort(contours.begin(), contours.end(),[](const std::vector<cv::Point> &a, const std::vector<cv::Point> &b)
area = cnt_area(contours[0]); {return cv::contourArea(a) > cv::contourArea(b);});
center = cv::Point(cx, cy); area_ = cnt_area(contours[0]);
center_ = cv::Point(cx, cy);
} }
cv::cvtColor(line_area_a1, line_area_a1, cv::COLOR_BGR2HSV); cv::cvtColor(line_area_a1_, line_area_a1_, cv::COLOR_BGR2HSV);
cv::inRange(line_area_a1, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_a1); cv::inRange(line_area_a1_, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_a1_);
//cv2.MORPH_CLOSE 先进行膨胀,再进行腐蚀操作 //cv2.MORPH_CLOSE 先进行膨胀,再进行腐蚀操作
kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5)); kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
cv::morphologyEx(line_area_a1, line_area_a1, cv::MORPH_CLOSE, kernel); cv::morphologyEx(line_area_a1_, line_area_a1_, cv::MORPH_CLOSE, kernel);
std::vector<std::vector<cv::Point>> contours_a1; std::vector<std::vector<cv::Point>> contours_a1;
cv::findContours(line_area_a1, contours_a1, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE); cv::findContours(line_area_a1_, contours_a1, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
if (contours_a1.size() > 0) { if (contours_a1.size() > 0){
cv::Rect rect = cv::boundingRect(contours_a1[0]); cv::Rect rect = cv::boundingRect(contours_a1[0]);
int cx = rect.x + rect.width / 2; int cx = rect.x + rect.width / 2;
int cy = rect.y + rect.height / 2 + cy_a1; int cy = rect.y + rect.height / 2 + _cy_a1;
center_a1 = cv::Point(cx - half_w, cy - half_h); center_a1_ = cv::Point(cx - _half_w, cy - _half_h);
} }
cv::cvtColor(line_area_a2, line_area_a2, cv::COLOR_BGR2HSV); cv::cvtColor(line_area_a2_, line_area_a2_, cv::COLOR_BGR2HSV);
cv::inRange(line_area_a2, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_a2); cv::inRange(line_area_a2_, cv::Scalar(hmin, smin, vmin), cv::Scalar(hmax, smax, vmax), line_area_a2_);
kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5)); kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
cv::morphologyEx(line_area_a2, line_area_a2, cv::MORPH_CLOSE, kernel); cv::morphologyEx(line_area_a2_, line_area_a2_, cv::MORPH_CLOSE, kernel);
std::vector<std::vector<cv::Point>> contours_a2; std::vector<std::vector<cv::Point>> contours_a2;
cv::findContours(line_area_a2, contours_a2, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE); cv::findContours(line_area_a2_, contours_a2, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
if (contours_a2.size() > 0) { if (contours_a2.size() > 0)
{
cv::Rect rect = cv::boundingRect(contours_a2[0]); cv::Rect rect = cv::boundingRect(contours_a2[0]);
int cx = rect.x + rect.width / 2; int cx = rect.x + rect.width / 2;
int cy = rect.y + rect.height / 2 + cy_a2; int cy = rect.y + rect.height / 2 + _cy_a2;
center_a2 = cv::Point(cx - half_w, cy - half_h); center_a2_ = cv::Point(cx - _half_w, cy - _half_h);
} }
} }
void ColorLineDetector::detect(cv::Mat img, sv::TargetsInFrame& tgts_) void ColorLineDetector::detect(cv::Mat img_, sv::TargetsInFrame &tgts_)
{ {
if(!this->is_load_parameter) if (!this->is_load_parameter)
{ {
_load(); _load();
this->is_load_parameter = true; this->is_load_parameter = true;
} }
int area_n = -1; int area_n = -1;
cv::Mat area_base, area_base_a1, area_base_a2; cv::Mat area_base, area_base_a1, area_base_a2;
cv::Point cxcy_n(0, 0), center_a1_n(0,0), center_a2_n(0, 0); cv::Point cxcy_n(0, 0), center_a1_n(0, 0), center_a2_n(0, 0);
get_line_area(img, area_base, area_base_a1, area_base_a2); get_line_area(img_, area_base, area_base_a1, area_base_a2);
seg(area_base, area_base_a1, area_base_a2, line_color, cxcy_n, area_n, center_a1_n, center_a2_n); seg(area_base, area_base_a1, area_base_a2, line_color, cxcy_n, area_n, center_a1_n, center_a2_n);
pose.x = 0.0; pose.x = 0.0;
pose.y = -1.0; pose.y = -1.0;
pose.z = 0.0; pose.z = 0.0;
if (area_n > 0)
if (area_n > 0) {
{
circle(area_base, cv::Point(cxcy_n.x, cxcy_n.y), 4, cv::Scalar(0, 0, 255), -1);
double angle = (cxcy_n.x - this->camera_matrix.at<double>(0, 2)) / this->camera_matrix.at<double>(0, 2) * atan((double)(area_base.rows / 2) / this->fov_x);
pose.x = angle;
pose.y = 1.0;
}
else
{
cv::Point cxcy__n(0, 0), center_a1__n(0, 0), center_a2__n(0, 0);
seg(area_base, area_base_a1, area_base_a2, line_color, cxcy__n, area_n = 0, center_a1__n, center_a2__n);
if (area_n > 0)
{
circle(area_base, cv::Point(cxcy_n.x, cxcy_n.y), 4, cv::Scalar(0, 0, 255), -1); circle(area_base, cv::Point(cxcy_n.x, cxcy_n.y), 4, cv::Scalar(0, 0, 255), -1);
double angle = (cxcy_n.x - this->camera_matrix.at<double>(0, 2)) / this->camera_matrix.at<double>(0, 2) * atan((double)(area_base.rows / 2) / this->fov_x); double angle = (cxcy_n.x - this->camera_matrix.at<double>(0, 2)) / this->camera_matrix.at<double>(0, 2) * atan((double)(area_base.rows / 2) / this->fov_x);
pose.x = angle; pose.x = angle;
pose.y = 1.0; pose.y = 1.0;
pose.z = 0.0;
}
}
tgts_.setSize(img.cols, img.rows);
tgts_.setFOV(this->fov_x, this->fov_y);
auto t1 = std::chrono::system_clock::now();
tgts_.setFPS(1000.0/std::chrono::duration_cast<std::chrono::milliseconds>(t1- this->_t0).count());
this->_t0 = std::chrono::system_clock::now();
tgts_.setTimeNow();
if(area_n > 0)
{
Target tgt;
tgt.los_ax = pose.x;
if(cxcy_n.x !=0 || cxcy_n.y !=0)
{
tgt.cx = cxcy_n.x;
tgt.cy = cxcy_n.y;
} }
else if(center_a1_n.x != 0 || center_a1_n.y != 0) else
{ {
tgt.cx = center_a1_n.x; cv::Point cxcy__n(0, 0), center_a1__n(0, 0), center_a2__n(0, 0);
tgt.cy = center_a1_n.y; seg(area_base, area_base_a1, area_base_a2, line_color, cxcy__n, area_n = 0, center_a1__n, center_a2__n);
} if (area_n > 0)
else if(center_a2_n.x != 0 || center_a2_n.y != 0) {
circle(area_base, cv::Point(cxcy_n.x, cxcy_n.y), 4, cv::Scalar(0, 0, 255), -1);
double angle = (cxcy_n.x - this->camera_matrix.at<double>(0, 2)) / this->camera_matrix.at<double>(0, 2) * atan((double)(area_base.rows / 2) / this->fov_x);
pose.x = angle;
pose.y = 1.0;
pose.z = 0.0;
}
}
tgts_.setSize(img_.cols, img_.rows);
tgts_.setFOV(this->fov_x, this->fov_y);
auto t1 = std::chrono::system_clock::now();
tgts_.setFPS(1000.0 / std::chrono::duration_cast<std::chrono::milliseconds>(t1 - this->_t0).count());
this->_t0 = std::chrono::system_clock::now();
tgts_.setTimeNow();
if (area_n > 0)
{ {
tgt.cx = center_a2_n.x; Target tgt;
tgt.cy = center_a2_n.y; tgt.los_ax = pose.x;
} if (cxcy_n.x != 0 || cxcy_n.y != 0)
{
tgts_.targets.push_back(tgt); tgt.cx = cxcy_n.x;
} tgt.cy = cxcy_n.y;
}
else if (center_a1_n.x != 0 || center_a1_n.y != 0)
{
tgt.cx = center_a1_n.x;
tgt.cy = center_a1_n.y;
}
else if (center_a2_n.x != 0 || center_a2_n.y != 0)
{
tgt.cx = center_a2_n.x;
tgt.cy = center_a2_n.y;
}
tgts_.targets.push_back(tgt);
}
} }
} }

View File

@ -9,7 +9,7 @@
#include "ellipse_detector.h" #include "ellipse_detector.h"
#define SV_MODEL_DIR "/SpireCV/models/" #define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/" #define SV_ROOT_DIR "/SpireCV/params/a-params/"
namespace sv { namespace sv {

View File

@ -6,26 +6,18 @@
#include <string> #include <string>
#include <chrono> #include <chrono>
namespace sv
{
namespace sv {
class ColorLineDetector : public CameraAlgorithm class ColorLineDetector : public CameraAlgorithm
{ {
public: public:
ColorLineDetector(); ColorLineDetector();
~ColorLineDetector(); ~ColorLineDetector();
void detect(cv::Mat img_, TargetsInFrame& tgts_); void detect(cv::Mat img_, TargetsInFrame &tgts_);
protected:
float cy_a1;
float cy_a2;
float half_h;
float half_w;
cv::Mat img;
cv::Point3d pose; cv::Point3d pose;
double line_location; double line_location;
@ -36,13 +28,16 @@ protected:
std::string line_color; std::string line_color;
protected:
float _cy_a1;
float _cy_a2;
float _half_h;
float _half_w;
void _load(); void _load();
float cnt_area(std::vector<cv::Point> cnt); float cnt_area(std::vector<cv::Point> cnt_);
void get_line_area(cv::Mat &frame, cv::Mat &line_area, cv::Mat &line_area_a1, cv::Mat &line_area_a2); void get_line_area(cv::Mat &frame_, cv::Mat &line_area_, cv::Mat &line_area_a1_, cv::Mat &line_area_a2_);
void seg(cv::Mat line_area, cv::Mat line_area_a1, cv::Mat line_area_a2, std::string _line_color, cv::Point &center, int &area, cv::Point &center_a1, cv::Point &center_a2); void seg(cv::Mat line_area_, cv::Mat line_area_a1_, cv::Mat line_area_a2_, std::string line_color_, cv::Point &center_, int &area_, cv::Point &center_a1_, cv::Point &center_a2_);
}; };
} }
#endif #endif

View File

@ -180,8 +180,7 @@
"line_color": "black", "line_color": "black",
"line_location": 0.5, "line_location": 0.5,
"line_location_a1": 0.3, "line_location_a1": 0.3,
"line_location_a2": 0.7, "line_location_a2": 0.7
} }
} }

View File

@ -9,7 +9,7 @@ int main(int argc, char *argv[]) {
// 实例化Aruco检测器类 // 实例化Aruco检测器类
sv::ArucoDetector ad; sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); ad.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;

View File

@ -6,19 +6,18 @@
using namespace std; using namespace std;
using namespace sv; using namespace sv;
int main(int argc, char *argv[])
{
int main(int argc, char *argv[]) {
// 实例化 color line detection 检测器类 // 实例化 color line detection 检测器类
sv::ColorLineDetector cld; sv::ColorLineDetector cld;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cld.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); cld.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;
cap.setWH(640, 480); cap.setWH(640, 480);
//cap.setFps(30); // cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0 cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img; cv::Mat img;
int frame_id = 0; int frame_id = 0;
@ -43,27 +42,21 @@ int main(int argc, char *argv[]) {
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y); printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度 // 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height); printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
for (int i=0; i<tgts.targets.size(); i++) for (int i = 0; i < tgts.targets.size(); i++)
{ {
// 打印每个 color_line 的中心位置cxcy的值域为[0, 1]以及cxcy的像素值 // 打印每个 color_line 的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Color Line detect Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n", printf(" Color Line detect Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].cx, tgts.targets[i].cy); tgts.targets[i].cx, tgts.targets[i].cy);
// 打印每个color_line的x_方向反正切值跟相机视场相关 // 打印每个color_line的x_方向反正切值跟相机视场相关
printf(" Color Line detect Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay); printf(" Color Line detect Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
} }
// 显示检测结果img // 显示检测结果img
cv::imshow("img", img); cv::imshow("img", img);
cv::waitKey(10); cv::waitKey(10);
}
return 0;
} }
return 0;
}

View File

@ -9,7 +9,7 @@ int main(int argc, char *argv[]) {
// 实例化 通用目标 检测器类 // 实例化 通用目标 检测器类
sv::CommonObjectDetector cod; sv::CommonObjectDetector cod;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); cod.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;

View File

@ -40,11 +40,11 @@ int main(int argc, char *argv[]) {
// 实例化 框选目标跟踪类 // 实例化 框选目标跟踪类
sv::SingleObjectTracker sot; sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); sot.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
sv::CommonObjectDetector cod; sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); cod.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// 打开摄像头 // 打开摄像头

View File

@ -9,7 +9,7 @@ int main(int argc, char *argv[]) {
// 实例化 椭圆 检测器类 // 实例化 椭圆 检测器类
sv::EllipseDetector ed; sv::EllipseDetector ed;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ed.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); ed.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;

View File

@ -77,10 +77,10 @@ int main(int argc, char *argv[])
// 实例化 框选目标跟踪类 // 实例化 框选目标跟踪类
sv::SingleObjectTracker sot; sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); sot.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
sv::CommonObjectDetector cod; sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); cod.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img; cv::Mat img;

View File

@ -58,7 +58,7 @@ int main(int argc, char *argv[])
// 实例化 圆形降落标志 检测器类 // 实例化 圆形降落标志 检测器类
sv::LandingMarkerDetector lmd; sv::LandingMarkerDetector lmd;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
lmd.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml"); lmd.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_1280x720.yaml");
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img; cv::Mat img;

View File

@ -67,7 +67,7 @@ int main(int argc, char *argv[])
// 实例化Aruco检测器类 // 实例化Aruco检测器类
sv::ArucoDetector ad; sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); ad.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
sv::UDPServer udp; sv::UDPServer udp;
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像

View File

@ -9,7 +9,7 @@ int main(int argc, char *argv[]) {
// 实例化 圆形降落标志 检测器类 // 实例化 圆形降落标志 检测器类
sv::LandingMarkerDetector lmd; sv::LandingMarkerDetector lmd;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
lmd.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); lmd.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;

View File

@ -28,16 +28,16 @@ int main(int argc, char *argv[]) {
// 实例化 框选目标跟踪类 // 实例化 框选目标跟踪类
sv::SingleObjectTracker sot; sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); sot.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml"); // sot.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_1280x720.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1920x1080.yaml"); // sot.loadCameraParams(sv::get_home() + "/SpireCV/params/c-params/calib_webcam_1920x1080.yaml");
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;
// cap.setWH(640, 480); // cap.setWH(640, 480);
// cap.setFps(30); // cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0 cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img; cv::Mat img;
int frame_id = 0; int frame_id = 0;

View File

@ -9,7 +9,7 @@ int main(int argc, char *argv[]) {
// 实例化Aruco检测器类 // 实例化Aruco检测器类
sv::ArucoDetector ad; sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams("/home/amov/SpireCV/calib_webcam_640x480.yaml"); ad.loadCameraParams("/home/amov/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;

View File

@ -9,7 +9,7 @@ int main(int argc, char *argv[]) {
// 实例化 通用目标 检测器类 // 实例化 通用目标 检测器类
sv::CommonObjectDetector cod; sv::CommonObjectDetector cod;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cod.loadCameraParams("/home/amov/SpireCV/calib_webcam_640x480.yaml"); cod.loadCameraParams("/home/amov/SpireCV/params/c-params/calib_webcam_640x480.yaml");
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;