SpireCV p230 branch for spirecv_ros p230 branch

This commit is contained in:
eason 2023-09-11 14:15:15 +08:00
parent b93d5c7381
commit 1a19b6c7f3
15 changed files with 837 additions and 14 deletions

3
.gitignore vendored
View File

@ -15,6 +15,9 @@ share/python-wheels/
*.egg *.egg
MANIFEST MANIFEST
.idea/ .idea/
models/
models-converting.sh
models-downloading.sh
# Prerequisites # Prerequisites
*.d *.d

View File

@ -271,6 +271,10 @@ add_executable(GimbalLandingMarkerDetection samples/demo/gimbal_landing_marker_d
target_link_libraries(GimbalLandingMarkerDetection sv_world) target_link_libraries(GimbalLandingMarkerDetection sv_world)
add_executable(GimbalUdpDetectionInfoSender samples/demo/gimbal_udp_detection_info_sender.cpp) add_executable(GimbalUdpDetectionInfoSender samples/demo/gimbal_udp_detection_info_sender.cpp)
target_link_libraries(GimbalUdpDetectionInfoSender sv_world) target_link_libraries(GimbalUdpDetectionInfoSender sv_world)
add_executable(ArucoDetectionWithSingleObjectTracking samples/demo/aruco_detection_with_single_object_tracking.cpp)
target_link_libraries(ArucoDetectionWithSingleObjectTracking sv_world)
add_executable(CarDetectionWithTracking samples/demo/car_detection_with_tracking.cpp)
target_link_libraries(CarDetectionWithTracking sv_world)
add_executable(EvalFpsOnVideo samples/test/eval_fps_on_video.cpp) add_executable(EvalFpsOnVideo samples/test/eval_fps_on_video.cpp)
target_link_libraries(EvalFpsOnVideo sv_world) target_link_libraries(EvalFpsOnVideo sv_world)

View File

@ -0,0 +1,20 @@
%YAML:1.0
---
calibration_time: "2021年01月12日 星期二 18时08分01秒"
image_width: 1280
image_height: 720
flags: 0
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 7.9379415710551370e+02, 0., 2.9783879354295328e+02, 0.,
7.9491985564466654e+02, 3.0942416136837386e+02, 0., 0., 1. ]
distortion_coefficients: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ 2.0950200339181715e-01, -1.1587468096518483e+00,
5.5342063671841328e-03, 2.2214393775334758e-04,
1.7127431916651392e+00 ]
avg_reprojection_error: 2.8342964851391211e-01

20
calib_webcam_640x480.yaml Normal file
View File

@ -0,0 +1,20 @@
%YAML:1.0
---
calibration_time: "2023年07月14日 星期五 16时39分17秒"
image_width: 640
image_height: 480
flags: 0
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.5099311307542973e+02, 0., 3.2898947972890943e+02, 0.,
6.0215873600107579e+02, 2.4195307609106428e+02, 0., 0., 1. ]
distortion_coefficients: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ 1.0737258446369682e-01, -1.2782122264046064e-01,
1.6844258609297487e-03, -6.6256775118868144e-04,
-3.5333889479158398e-01 ]
avg_reprojection_error: 3.3968000452388564e-01

View File

@ -119,6 +119,8 @@ int main(int argc, char *argv[]) {
} }
VideoCapture inputVideo; VideoCapture inputVideo;
int waitTime; int waitTime;
if(!video.empty()) { if(!video.empty()) {
inputVideo.open(video); inputVideo.open(video);
@ -127,6 +129,9 @@ int main(int argc, char *argv[]) {
inputVideo.open(camId); inputVideo.open(camId);
waitTime = 10; waitTime = 10;
} }
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0); aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
if (parser.has("d")) { if (parser.has("d")) {
@ -159,6 +164,8 @@ int main(int argc, char *argv[]) {
while(inputVideo.grab()) { while(inputVideo.grab()) {
Mat image, imageCopy; Mat image, imageCopy;
inputVideo.retrieve(image); inputVideo.retrieve(image);
cv::resize(image, image, cv::Size(640, 480));
vector< int > ids; vector< int > ids;
vector< vector< Point2f > > corners, rejected; vector< vector< Point2f > > corners, rejected;

View File

@ -9,13 +9,13 @@ int main(int argc, char *argv[]) {
// 实例化Aruco检测器类 // 实例化Aruco检测器类
sv::ArucoDetector ad; sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件 // 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml"); ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;
// cap.setWH(640, 480); // cap.setWH(640, 480);
// cap.setFps(30); // cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0 cap.open(sv::CameraType::WEBCAM, 2); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img; cv::Mat img;
int frame_id = 0; int frame_id = 0;

View File

@ -0,0 +1,173 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
// 定义窗口名称
static const std::string RGB_WINDOW = "Image window";
// 框选到的矩形
cv::Rect rect_sel;
// 框选起始点
cv::Point pt_origin;
// 是否按下左键
bool b_clicked = false;
// 是否得到一个新的框选区域
bool b_renew_ROI = false;
// 是否开始跟踪
bool b_begin_TRACK = false;
// 实现框选逻辑的回调函数
void onMouse(int event, int x, int y, int, void*);
int main(int argc, char *argv[]) {
// 定义一个新的窗口,可在上面进行框选操作
cv::namedWindow(RGB_WINDOW);
// 设置窗口操作回调函数,该函数实现整个框选逻辑
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1920x1080.yaml");
// 实例化Aruco检测器类
sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
cap.setWH(640, 480);
cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 2); // CameraID 0
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(sot.image_width, sot.image_height));
// 开始 单目标跟踪 逻辑
// 是否有新的目标被手动框选
if (b_renew_ROI)
{
// 拿新的框选区域 来 初始化跟踪器
sot.init(img, rect_sel);
// std::cout << rect_sel << std::endl;
// 重置框选标志
b_renew_ROI = false;
// 开始跟踪
b_begin_TRACK = true;
}
else if (b_begin_TRACK)
{
// 以前一帧的结果继续跟踪
sot.track(img, tgts);
// 执行Aruco二维码检测
ad.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印 单目标跟踪 结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
if (tgts.targets.size() > 0)
{
printf("Frame-[%d]\n", frame_id);
// 打印 跟踪目标 的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Tracking Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[0].cx, tgts.targets[0].cy,
int(tgts.targets[0].cx * tgts.width),
int(tgts.targets[0].cy * tgts.height));
// 打印 跟踪目标 的外接矩形框的宽度、高度wh的值域为(0, 1]以及wh的像素值
printf(" Tracking Size (w, h) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[0].w, tgts.targets[0].h,
int(tgts.targets[0].w * tgts.width),
int(tgts.targets[0].h * tgts.height));
// 打印 跟踪目标 的视线角,跟相机视场相关
printf(" Tracking Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[0].los_ax, tgts.targets[0].los_ay);
for (int i=0; i<tgts.targets.size(); i++)
{
printf("Frame-[%d], Aruco-[%d]\n", frame_id, i);
// 打印每个二维码的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Aruco Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].cx, tgts.targets[i].cy,
int(tgts.targets[i].cx * tgts.width),
int(tgts.targets[i].cy * tgts.height));
// 打印每个二维码的外接矩形框的宽度、高度wh的值域为(0, 1]以及wh的像素值
printf(" Aruco Size (w, h) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].w, tgts.targets[i].h,
int(tgts.targets[i].w * tgts.width),
int(tgts.targets[i].h * tgts.height));
// 打印每个二维码的方位角,值域为[-180, 180]
printf(" Aruco Yaw-angle = %.2f\n", tgts.targets[i].yaw_a);
// 打印每个二维码的类别,字符串类型,"aruco-?"
printf(" Aruco Category = %s\n", tgts.targets[i].category.c_str());
// 打印每个二维码的ID号
printf(" Aruco Tracked-ID = %d\n", tgts.targets[i].tracked_id);
// 打印每个二维码的视线角,跟相机视场相关
printf(" Aruco Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
// 打印每个二维码的3D位置在相机坐标系下跟二维码实际边长、相机参数相关
printf(" Aruco Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
}
}
}
// 显示检测结果img
cv::imshow(RGB_WINDOW, img);
cv::waitKey(1);
}
return 0;
}
void onMouse(int event, int x, int y, int, void*)
{
if (b_clicked)
{
// 更新框选区域坐标
rect_sel.x = MIN(pt_origin.x, x);
rect_sel.y = MIN(pt_origin.y, y);
rect_sel.width = abs(x - pt_origin.x);
rect_sel.height = abs(y - pt_origin.y);
}
// 左键按下
if (event == cv::EVENT_LBUTTONDOWN)
{
b_begin_TRACK = false;
b_clicked = true;
pt_origin = cv::Point(x, y);
rect_sel = cv::Rect(x, y, 0, 0);
}
// 左键松开
else if (event == cv::EVENT_LBUTTONUP)
{
// 框选区域需要大于8x8像素
if (rect_sel.width * rect_sel.height < 64)
{
;
}
else
{
b_clicked = false;
b_renew_ROI = true;
}
}
}

View File

@ -8,9 +8,9 @@ using namespace std;
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;
// cap.setWH(640, 480); cap.setWH(1280, 720);
// cap.setFps(30); cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0 cap.open(sv::CameraType::WEBCAM, 2); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img; cv::Mat img;
while (1) while (1)
@ -20,7 +20,7 @@ int main(int argc, char *argv[]) {
// 显示img // 显示img
cv::imshow("img", img); cv::imshow("img", img);
cv::waitKey(10); cv::waitKey(1);
} }
return 0; return 0;

View File

@ -0,0 +1,193 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
// 定义窗口名称
static const std::string RGB_WINDOW = "Image window";
// 框选到的矩形
cv::Rect rect_sel;
// 框选起始点
cv::Point pt_origin;
// 是否得到一个新的框选区域
bool b_renew_ROI = false;
// 是否开始跟踪
bool b_begin_TRACK = false;
// 实现框选逻辑的回调函数
void onMouse(int event, int x, int y, int, void*);
struct node {
double x,y;
};
node p1,p2,p3,p4;
node p;
double getCross(node p1, node p2, node p) {
return (p2.x-p1.x)*(p.y-p1.y)-(p.x-p1.x)*(p2.y-p1.y);
}
bool b_clicked =false;
bool detect_tracking =true;
int main(int argc, char *argv[]) {
// 定义一个新的窗口,可在上面进行框选操作
cv::namedWindow(RGB_WINDOW);
// 设置窗口操作回调函数,该函数实现整个框选逻辑
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
cap.setWH(640, 480);
cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 2); // CameraID 0
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
if (detect_tracking == true) {
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(cod.image_width, cod.image_height));
// 执行通用目标检测
cod.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印通用目标检测结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
for (int i=0; i<tgts.targets.size(); i++)
{
printf("Frame-[%d], Object-[%d]\n", frame_id, i);
// 打印每个目标的中心位置cxcy的值域为[0, 1]
printf(" Object Center (cx, cy) = (%.3f, %.3f)\n", tgts.targets[i].cx, tgts.targets[i].cy);
// 打印每个目标的外接矩形框的宽度、高度wh的值域为(0, 1]
printf(" Object Size (w, h) = (%.3f, %.3f)\n", tgts.targets[i].w, tgts.targets[i].h);
// 打印每个目标的置信度
printf(" Object Score = %.3f\n", tgts.targets[i].score);
// 打印每个目标的类别,字符串类型
printf(" Object Category = %s, Category ID = [%d]\n", tgts.targets[i].category.c_str(), tgts.targets[i].category_id);
// 打印每个目标的视线角,跟相机视场相关
printf(" Object Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
// 打印每个目标的3D位置在相机坐标系下跟目标实际长宽、相机参数相关
printf(" Object Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
p1.x = tgts.targets[i].cx * tgts.width - tgts.targets[i].w * tgts.width / 2;
p1.y = tgts.targets[i].cy * tgts.height - tgts.targets[i].h * tgts.height / 2;
p2.x = tgts.targets[i].cx * tgts.width + tgts.targets[i].w * tgts.width / 2;
p2.y = tgts.targets[i].cy * tgts.height - tgts.targets[i].h * tgts.height / 2;
p4.x = tgts.targets[i].cx * tgts.width - tgts.targets[i].w * tgts.width / 2;
p4.y = tgts.targets[i].cy * tgts.height + tgts.targets[i].h * tgts.height / 2;
p3.x = tgts.targets[i].cx * tgts.width + tgts.targets[i].w * tgts.width / 2;
p3.y = tgts.targets[i].cy * tgts.height + tgts.targets[i].h * tgts.height / 2;
p.x = pt_origin.x;
p.y = pt_origin.y;
std::cout << "p.x " << p.x << "\t" << "p.y " << p.y << std::endl;
if (getCross(p1, p2, p) * getCross(p3, p4, p) >= 0 && getCross(p2, p3, p) * getCross(p4, p1, p) >= 0) {
b_begin_TRACK = false;
detect_tracking = false;
// pt_origin = cv::Point(nor_x, nor_p_y);
// std::cout << "pt_origin " <<nor_x<<"/t"<<nor_p_y<< std::endl;
rect_sel = cv::Rect(p1.x, p1.y, tgts.targets[i].w * tgts.width, tgts.targets[i].h * tgts.height);
// std::cout << rect_sel << std::endl;
b_renew_ROI = true;
frame_id = 0;
printf("rect_sel Yes\n");
}
else {
printf("rect_sel No\n");
}
}
}
else {
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(sot.image_width, sot.image_height));
// 开始 单目标跟踪 逻辑
// 是否有新的目标被手动框选
if (b_renew_ROI)
{
// 拿新的框选区域 来 初始化跟踪器
sot.init(img, rect_sel);
// std::cout << rect_sel << std::endl;
// 重置框选标志
b_renew_ROI = false;
// 开始跟踪
b_begin_TRACK = true;
}
else if (b_begin_TRACK)
{
// 以前一帧的结果继续跟踪
sot.track(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印 单目标跟踪 结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
if (tgts.targets.size() > 0)
{
printf("Frame-[%d]\n", frame_id);
// 打印 跟踪目标 的中心位置cxcy的值域为[0, 1]
printf(" Tracking Center (cx, cy) = (%.3f, %.3f)\n", tgts.targets[0].cx, tgts.targets[0].cy);
// 打印 跟踪目标 的外接矩形框的宽度、高度wh的值域为(0, 1]
printf(" Tracking Size (w, h) = (%.3f, %.3f)\n", tgts.targets[0].w, tgts.targets[0].h);
// 打印 跟踪目标 的视线角,跟相机视场相关
printf(" Tracking Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[0].los_ax, tgts.targets[0].los_ay);
}
}
}//end of tracking
// 显示检测结果img
cv::imshow(RGB_WINDOW, img);
cv::waitKey(1);
}
return 0;
}
void onMouse(int event, int x, int y, int, void*)
{
if (b_clicked)
{
// 更新框选区域坐标
pt_origin.x = 0;
pt_origin.y = 0;
}
// 左键按下
if (event == cv::EVENT_LBUTTONDOWN)
{
detect_tracking = true;
pt_origin = cv::Point(x, y);
}
else if (event == cv::EVENT_RBUTTONDOWN)
{
detect_tracking = true;
b_renew_ROI = false;
b_begin_TRACK = false;
b_clicked = true;
}
}

View File

@ -13,9 +13,9 @@ int main(int argc, char *argv[]) {
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;
// cap.setWH(640, 480); cap.setWH(640, 480);
// cap.setFps(30); cap.setFps(60);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0 cap.open(sv::CameraType::WEBCAM, 2); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img; cv::Mat img;
int frame_id = 0; int frame_id = 0;
@ -65,7 +65,7 @@ int main(int argc, char *argv[]) {
// 显示检测结果img // 显示检测结果img
cv::imshow("img", img); cv::imshow("img", img);
cv::waitKey(10); cv::waitKey(1);
} }
return 0; return 0;

View File

@ -51,7 +51,7 @@ int main(int argc, char *argv[]) {
sv::Camera cap; sv::Camera cap;
// cap.setWH(640, 480); // cap.setWH(640, 480);
// cap.setFps(30); // cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0 cap.open(sv::CameraType::WEBCAM, 2); // CameraID 0
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4"); // cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img; cv::Mat img;

View File

@ -13,8 +13,8 @@ int main(int argc, char *argv[]) {
// 打开摄像头 // 打开摄像头
sv::Camera cap; sv::Camera cap;
// cap.setWH(640, 480); cap.setWH(640, 480);
// cap.setFps(30); cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0 cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像 // 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img; cv::Mat img;
@ -63,7 +63,7 @@ int main(int argc, char *argv[]) {
// 显示检测结果img // 显示检测结果img
cv::imshow("img", img); cv::imshow("img", img);
cv::waitKey(10); cv::waitKey(1);
} }
return 0; return 0;

View File

@ -1,11 +1,23 @@
{ {
"CommonObjectDetector": { "CommonObjectDetector": {
<<<<<<< Updated upstream
"dataset": "COCO", "dataset": "COCO",
"inputSize": 1280, "inputSize": 1280,
"nmsThrs": 0.6, "nmsThrs": 0.6,
"scoreThrs": 0.3, "scoreThrs": 0.3,
"useWidthOrHeight": 1, "useWidthOrHeight": 1,
"withSegmentation": false, "withSegmentation": false,
=======
"dataset": "CAR",
"inputSize": 640,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": false,
"datasetCAR":{
"diff_car":[0.12,0.105]
},
>>>>>>> Stashed changes
"datasetPersonVehicle": { "datasetPersonVehicle": {
"person": [0.5, 1.8], "person": [0.5, 1.8],
"car": [4.1, 1.5], "car": [4.1, 1.5],
@ -114,6 +126,7 @@
"SingleObjectTracker": { "SingleObjectTracker": {
"algorithm": "nano", "algorithm": "nano",
"backend": 0, "backend": 0,
<<<<<<< Updated upstream
"target": 0, "target": 0,
"useWidthOrHeight": 0, "useWidthOrHeight": 0,
"sigleobjectW":2, "sigleobjectW":2,
@ -125,6 +138,16 @@
}, },
"EllipseDetector": { "EllipseDetector": {
"radiusInMeter": 0.1535, "radiusInMeter": 0.1535,
=======
"target": 0
},
"LandingMarkerDetector": {
"labels": ["h"],
"maxCandidates": 5
},
"EllipseDetector": {
"radiusInMeter": 0.685,
>>>>>>> Stashed changes
"preProcessingGaussKernel": 5, "preProcessingGaussKernel": 5,
"preProcessingGaussSigma": 1.306, "preProcessingGaussSigma": 1.306,
"thPosition": 1.0, "thPosition": 1.0,
@ -132,10 +155,17 @@
"minEdgeLength": 9, "minEdgeLength": 9,
"minOrientedRectSide": 2.984, "minOrientedRectSide": 2.984,
"distanceToEllipseContour": 0.111, "distanceToEllipseContour": 0.111,
<<<<<<< Updated upstream
"minScore": 0.7, "minScore": 0.7,
"minReliability": 0.470, "minReliability": 0.470,
"ns": 22, "ns": 22,
"percentNe": 0.99, "percentNe": 0.99,
=======
"minScore": 0.511,
"minReliability": 0.470,
"ns": 22,
"percentNe": 0.946,
>>>>>>> Stashed changes
"T_CNC": 0.121, "T_CNC": 0.121,
"T_TCN_L": 0.468, "T_TCN_L": 0.468,
"T_TCN_P": 0.560, "T_TCN_P": 0.560,
@ -144,8 +174,13 @@
"ArucoDetector": { "ArucoDetector": {
"dictionaryId": 10, "dictionaryId": 10,
"markerIds": [-1], "markerIds": [-1],
<<<<<<< Updated upstream
"markerLengths": [0.2], "markerLengths": [0.2],
"adaptiveThreshConstant": 35, "adaptiveThreshConstant": 35,
=======
"markerLengths": [0.17],
"adaptiveThreshConstant": 7,
>>>>>>> Stashed changes
"adaptiveThreshWinSizeMax": 23, "adaptiveThreshWinSizeMax": 23,
"adaptiveThreshWinSizeMin": 3, "adaptiveThreshWinSizeMin": 3,
"adaptiveThreshWinSizeStep": 10, "adaptiveThreshWinSizeStep": 10,

View File

@ -0,0 +1,184 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"inputSize": 1280,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": false,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],
"bus": [10, 3],
"truck": [-1, -1],
"bike": [-1, -1],
"train": [-1, -1],
"boat": [-1, -1],
"aeroplane": [-1, -1]
},
"datasetDrone": {
"drone": [0.4, 0.2]
},
"datasetCOCO": {
"person": [-1, -1],
"bicycle": [-1, -1],
"car": [-1, -1],
"motorcycle": [-1, -1],
"airplane": [-1, -1],
"bus": [-1, -1],
"train": [-1, -1],
"truck": [-1, -1],
"boat": [-1, -1],
"traffic light": [-1, -1],
"fire hydrant": [-1, -1],
"stop sign": [-1, -1],
"parking meter": [-1, -1],
"bench": [-1, -1],
"bird": [-1, -1],
"cat": [-1, -1],
"dog": [-1, -1],
"horse": [-1, -1],
"sheep": [-1, -1],
"cow": [-1, -1],
"elephant": [-1, -1],
"bear": [-1, -1],
"zebra": [-1, -1],
"giraffe": [-1, -1],
"backpack": [-1, -1],
"umbrella": [-1, -1],
"handbag": [-1, -1],
"tie": [-1, -1],
"suitcase": [-1, -1],
"frisbee": [-1, -1],
"skis": [-1, -1],
"snowboard": [-1, -1],
"sports ball": [-1, -1],
"kite": [-1, -1],
"baseball bat": [-1, -1],
"baseball glove": [-1, -1],
"skateboard": [-1, -1],
"surfboard": [-1, -1],
"tennis racket": [-1, -1],
"bottle": [-1, -1],
"wine glass": [-1, -1],
"cup": [-1, -1],
"fork": [-1, -1],
"knife": [-1, -1],
"spoon": [-1, -1],
"bowl": [-1, -1],
"banana": [-1, -1],
"apple": [-1, -1],
"sandwich": [-1, -1],
"orange": [-1, -1],
"broccoli": [-1, -1],
"carrot": [-1, -1],
"hot dog": [-1, -1],
"pizza": [-1, -1],
"donut": [-1, -1],
"cake": [-1, -1],
"chair": [-1, -1],
"couch": [-1, -1],
"potted plant": [-1, -1],
"bed": [-1, -1],
"dining table": [-1, -1],
"toilet": [-1, -1],
"tv": [-1, -1],
"laptop": [-1, -1],
"mouse": [-1, -1],
"remote": [-1, -1],
"keyboard": [-1, -1],
"cell phone": [-1, -1],
"microwave": [-1, -1],
"oven": [-1, -1],
"toaster": [-1, -1],
"sink": [-1, -1],
"refrigerator": [-1, -1],
"book": [-1, -1],
"clock": [-1, -1],
"vase": [-1, -1],
"scissors": [-1, -1],
"teddy bear": [-1, -1],
"hair drier": [-1, -1],
"toothbrush": [-1, -1]
}
},
"AutoFocusObjectDetector": {
"lock_thres": 5,
"unlock_thres": 5,
"lock_scale_init": 12.0,
"lock_scale": 8.0,
"categories_filter": [],
"keep_unlocked": false,
"use_square_region": false
},
"SingleObjectTracker": {
"algorithm": "siamrpn",
"backend": 0,
"target": 0
},
"LandingMarkerDetector": {
"labels": ["x", "h"],
"maxCandidates": 5
},
"EllipseDetector": {
"radiusInMeter": 0.5,
"preProcessingGaussKernel": 5,
"preProcessingGaussSigma": 1.306,
"thPosition": 1.0,
"maxCenterDistance": 0.05,
"minEdgeLength": 9,
"minOrientedRectSide": 2.984,
"distanceToEllipseContour": 0.111,
"minScore": 0.511,
"minReliability": 0.470,
"ns": 22,
"percentNe": 0.946,
"T_CNC": 0.121,
"T_TCN_L": 0.468,
"T_TCN_P": 0.560,
"thRadius": 0.202
},
"ArucoDetector": {
"dictionaryId": 10,
"markerIds": [-1],
"markerLengths": [0.2],
"adaptiveThreshConstant": 7,
"adaptiveThreshWinSizeMax": 23,
"adaptiveThreshWinSizeMin": 3,
"adaptiveThreshWinSizeStep": 10,
"aprilTagCriticalRad": 0.17453292519,
"aprilTagDeglitch": 0,
"aprilTagMaxLineFitMse": 10.0,
"aprilTagMaxNmaxima": 10,
"aprilTagMinClusterPixels": 5,
"aprilTagMinWhiteBlackDiff": 5,
"aprilTagQuadDecimate": 0.0,
"aprilTagQuadSigma": 0.0,
"cornerRefinementMaxIterations": 30,
"cornerRefinementMethod": 0,
"cornerRefinementMinAccuracy": 0.1,
"cornerRefinementWinSize": 5,
"detectInvertedMarker": false,
"errorCorrectionRate": 0.6,
"markerBorderBits": 1,
"maxErroneousBitsInBorderRate": 0.35,
"maxMarkerPerimeterRate": 4.0,
"minCornerDistanceRate": 0.05,
"minDistanceToBorder": 3,
"minMarkerDistanceRate": 0.05,
"minMarkerLengthRatioOriginalImg": 0,
"minMarkerPerimeterRate": 0.03,
"minOtsuStdDev": 5.0,
"minSideLengthCanonicalImg": 32,
"perspectiveRemoveIgnoredMarginPerCell": 0.13,
"perspectiveRemovePixelPerCell": 4,
"polygonalApproxAccuracyRate": 0.03,
"useAruco3Detection": false
},
"ColorLineDetector": {
"line_color": "black",
"line_location": 0.5,
"line_location_a1": 0.3,
"line_location_a2": 0.7
}
}

View File

@ -0,0 +1,184 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"inputSize": 640,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": true,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],
"bus": [10, 3],
"truck": [-1, -1],
"bike": [-1, -1],
"train": [-1, -1],
"boat": [-1, -1],
"aeroplane": [-1, -1]
},
"datasetDrone": {
"drone": [0.4, 0.2]
},
"datasetCOCO": {
"person": [-1, -1],
"bicycle": [-1, -1],
"car": [-1, -1],
"motorcycle": [-1, -1],
"airplane": [-1, -1],
"bus": [-1, -1],
"train": [-1, -1],
"truck": [-1, -1],
"boat": [-1, -1],
"traffic light": [-1, -1],
"fire hydrant": [-1, -1],
"stop sign": [-1, -1],
"parking meter": [-1, -1],
"bench": [-1, -1],
"bird": [-1, -1],
"cat": [-1, -1],
"dog": [-1, -1],
"horse": [-1, -1],
"sheep": [-1, -1],
"cow": [-1, -1],
"elephant": [-1, -1],
"bear": [-1, -1],
"zebra": [-1, -1],
"giraffe": [-1, -1],
"backpack": [-1, -1],
"umbrella": [-1, -1],
"handbag": [-1, -1],
"tie": [-1, -1],
"suitcase": [-1, -1],
"frisbee": [-1, -1],
"skis": [-1, -1],
"snowboard": [-1, -1],
"sports ball": [-1, -1],
"kite": [-1, -1],
"baseball bat": [-1, -1],
"baseball glove": [-1, -1],
"skateboard": [-1, -1],
"surfboard": [-1, -1],
"tennis racket": [-1, -1],
"bottle": [-1, -1],
"wine glass": [-1, -1],
"cup": [-1, -1],
"fork": [-1, -1],
"knife": [-1, -1],
"spoon": [-1, -1],
"bowl": [-1, -1],
"banana": [-1, -1],
"apple": [-1, -1],
"sandwich": [-1, -1],
"orange": [-1, -1],
"broccoli": [-1, -1],
"carrot": [-1, -1],
"hot dog": [-1, -1],
"pizza": [-1, -1],
"donut": [-1, -1],
"cake": [-1, -1],
"chair": [-1, -1],
"couch": [-1, -1],
"potted plant": [-1, -1],
"bed": [-1, -1],
"dining table": [-1, -1],
"toilet": [-1, -1],
"tv": [-1, -1],
"laptop": [-1, -1],
"mouse": [-1, -1],
"remote": [-1, -1],
"keyboard": [-1, -1],
"cell phone": [-1, -1],
"microwave": [-1, -1],
"oven": [-1, -1],
"toaster": [-1, -1],
"sink": [-1, -1],
"refrigerator": [-1, -1],
"book": [-1, -1],
"clock": [-1, -1],
"vase": [-1, -1],
"scissors": [-1, -1],
"teddy bear": [-1, -1],
"hair drier": [-1, -1],
"toothbrush": [-1, -1]
}
},
"AutoFocusObjectDetector": {
"lock_thres": 5,
"unlock_thres": 5,
"lock_scale_init": 12.0,
"lock_scale": 8.0,
"categories_filter": [],
"keep_unlocked": false,
"use_square_region": false
},
"SingleObjectTracker": {
"algorithm": "siamrpn",
"backend": 0,
"target": 0
},
"LandingMarkerDetector": {
"labels": ["x", "h"],
"maxCandidates": 5
},
"EllipseDetector": {
"radiusInMeter": 0.5,
"preProcessingGaussKernel": 5,
"preProcessingGaussSigma": 1.306,
"thPosition": 1.0,
"maxCenterDistance": 0.05,
"minEdgeLength": 9,
"minOrientedRectSide": 2.984,
"distanceToEllipseContour": 0.111,
"minScore": 0.511,
"minReliability": 0.470,
"ns": 22,
"percentNe": 0.946,
"T_CNC": 0.121,
"T_TCN_L": 0.468,
"T_TCN_P": 0.560,
"thRadius": 0.202
},
"ArucoDetector": {
"dictionaryId": 10,
"markerIds": [-1],
"markerLengths": [0.2],
"adaptiveThreshConstant": 7,
"adaptiveThreshWinSizeMax": 23,
"adaptiveThreshWinSizeMin": 3,
"adaptiveThreshWinSizeStep": 10,
"aprilTagCriticalRad": 0.17453292519,
"aprilTagDeglitch": 0,
"aprilTagMaxLineFitMse": 10.0,
"aprilTagMaxNmaxima": 10,
"aprilTagMinClusterPixels": 5,
"aprilTagMinWhiteBlackDiff": 5,
"aprilTagQuadDecimate": 0.0,
"aprilTagQuadSigma": 0.0,
"cornerRefinementMaxIterations": 30,
"cornerRefinementMethod": 0,
"cornerRefinementMinAccuracy": 0.1,
"cornerRefinementWinSize": 5,
"detectInvertedMarker": false,
"errorCorrectionRate": 0.6,
"markerBorderBits": 1,
"maxErroneousBitsInBorderRate": 0.35,
"maxMarkerPerimeterRate": 4.0,
"minCornerDistanceRate": 0.05,
"minDistanceToBorder": 3,
"minMarkerDistanceRate": 0.05,
"minMarkerLengthRatioOriginalImg": 0,
"minMarkerPerimeterRate": 0.03,
"minOtsuStdDev": 5.0,
"minSideLengthCanonicalImg": 32,
"perspectiveRemoveIgnoredMarginPerCell": 0.13,
"perspectiveRemovePixelPerCell": 4,
"polygonalApproxAccuracyRate": 0.03,
"useAruco3Detection": false
},
"ColorLineDetector": {
"line_color": "black",
"line_location": 0.5,
"line_location_a1": 0.3,
"line_location_a2": 0.7
}
}