5 Commits
v0.1.0 ... p450

Author SHA1 Message Date
Daniel
fb2a60e3ca update. 2024-01-03 17:45:48 +08:00
Daniel
ddb157b374 add a new drawTargetsInFrame() for single aruco tracking. 2023-12-28 12:20:14 +08:00
jario-jin
6bd48bac2b fix install scripts 2023-11-07 18:38:45 +08:00
jario
47bb722038 Common Object Detector supports input of HR images 2023-10-27 20:42:14 +08:00
jario
c46fe93dbb fix MIPI 4K reading 2023-10-27 13:39:03 +08:00
54 changed files with 1428 additions and 1149 deletions

View File

@@ -39,8 +39,8 @@ bool SingleObjectTrackerOCV470Impl::ocv470Setup(SingleObjectTrackerBase* base_)
TrackerNano::Params nano_params;
nano_params.backbone = samples::findFile(backbone);
nano_params.neckhead = samples::findFile(neckhead);
nano_params.backend = this->_backend;
nano_params.target = this->_target;
nano_params.backend = cv::dnn::DNN_BACKEND_CUDA;
nano_params.target = cv::dnn::DNN_TARGET_CUDA;
_nano = TrackerNano::create(nano_params);
}

File diff suppressed because it is too large Load Diff

0
build_on_jetson.sh Executable file → Normal file
View File

0
build_on_x86_cuda.sh Executable file → Normal file
View File

0
build_on_x86_intel.sh Executable file → Normal file
View File

0
gimbal_ctrl/IOs/serial/README.md Executable file → Normal file
View File

0
gimbal_ctrl/IOs/serial/include/serial/impl/unix.h Executable file → Normal file
View File

0
gimbal_ctrl/IOs/serial/include/serial/impl/win.h Executable file → Normal file
View File

0
gimbal_ctrl/IOs/serial/include/serial/serial.h Executable file → Normal file
View File

0
gimbal_ctrl/IOs/serial/include/serial/v8stdint.h Executable file → Normal file
View File

View File

View File

View File

0
gimbal_ctrl/IOs/serial/src/impl/unix.cc Executable file → Normal file
View File

0
gimbal_ctrl/IOs/serial/src/impl/win.cc Executable file → Normal file
View File

0
gimbal_ctrl/IOs/serial/src/serial.cc Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/AT10/AT10_gimbal_crc32.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/AT10/AT10_gimbal_driver.cpp Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/AT10/AT10_gimbal_driver.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/AT10/AT10_gimbal_funtion.cpp Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/AT10/AT10_gimbal_struct.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/CMakeLists.txt Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/FIFO/CMakeLists.txt Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/FIFO/Ring_Fifo.cpp Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/FIFO/Ring_Fifo.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/G1/g1_gimbal_crc32.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/G1/g1_gimbal_driver.cpp Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/G1/g1_gimbal_driver.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/G1/g1_gimbal_funtion.cpp Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/G1/g1_gimbal_struct.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_crc32.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_driver.cpp Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_driver.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_funtion.cpp Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_struct.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/amov_gimbal.cpp Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/amov_gimbal.h Executable file → Normal file
View File

0
gimbal_ctrl/driver/src/amov_gimbal_struct.h Executable file → Normal file
View File

View File

@@ -104,6 +104,10 @@ public:
std::string getAlgorithm();
int getBackend();
int getTarget();
double getObjectWs();
double getObjectHs();
int useWidthOrHeight();
protected:
virtual bool setupImpl();
virtual void initImpl(cv::Mat img_, const cv::Rect& bounding_box_);
@@ -113,6 +117,9 @@ protected:
std::string _algorithm;
int _backend;
int _target;
int _use_width_or_height;
double _object_ws;
double _object_hs;
};
@@ -170,3 +177,4 @@ protected:
}
#endif

View File

@@ -12,6 +12,9 @@
#include <arpa/inet.h>
#include <netinet/in.h> // for sockaddr_in
#include <mutex>
#include <condition_variable>
#define SV_RAD2DEG 57.2957795
// #define X86_PLATFORM
// #define JETSON_PLATFORM
@@ -358,7 +361,15 @@ protected:
void _run();
bool _is_running;
// new mutex
std::mutex _frame_mutex;
std::condition_variable_any _frame_empty;
//old flag
bool _is_updated;
std::thread _tt;
cv::VideoCapture _cap;
cv::Mat _frame;
@@ -377,6 +388,21 @@ protected:
double _exposure;
};
void drawTargetsInFrame(
cv::Mat& img_,
const TargetsInFrame& tgts_,
int aruco_track_id,
bool with_all=true,
bool with_category=false,
bool with_tid=false,
bool with_seg=false,
bool with_box=false,
bool with_ell=false,
bool with_aruco=false,
bool with_yaw=false
);
void drawTargetsInFrame(
cv::Mat& img_,
@@ -397,3 +423,4 @@ void list_dir(std::string dir, std::vector<std::string>& files, std::string suff
}
#endif

View File

@@ -1,11 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"dataset": "CAR",
"inputSize": 640,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"scoreThrs": 0.6,
"useWidthOrHeight": 1,
"withSegmentation": true,
"withSegmentation": false,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],
@@ -19,6 +19,9 @@
"datasetDrone": {
"drone": [0.4, 0.2]
},
"datasetCAR": {
"car": [0.12, 0.1]
},
"datasetCOCO": {
"person": [-1, -1],
"bicycle": [-1, -1],
@@ -114,7 +117,10 @@
"SingleObjectTracker": {
"algorithm": "nano",
"backend": 0,
"target": 0
"target": 0,
"useWidthOrHeight": 0,
"sigleobjectW":0.126,
"sigleobjectH":-1
},
"MultipleObjectTracker": {
"algorithm": "sort",
@@ -151,7 +157,7 @@
"ArucoDetector": {
"dictionaryId": 10,
"markerIds": [-1],
"markerLengths": [0.2],
"markerLengths": [0.17],
"adaptiveThreshConstant": 7,
"adaptiveThreshWinSizeMax": 23,
"adaptiveThreshWinSizeMin": 3,

View File

@@ -124,7 +124,9 @@ int main(int argc, char *argv[]) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
char pipe[512];
sprintf(pipe, "rtsp://192.168.2.64:554/H264?W=1280&H=720&FPS=30&BR=4000000");
inputVideo.open(pipe);
waitTime = 10;
}

View File

@@ -9,13 +9,13 @@ int main(int argc, char *argv[]) {
// 实例化Aruco检测器类
sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
cap.setWH(1280, 720);
cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 4); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;

View File

@@ -40,18 +40,18 @@ int main(int argc, char *argv[]) {
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
cap.setWH(1280, 720);
cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 4); // CameraID 0
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;

View File

@@ -9,13 +9,13 @@ int main(int argc, char *argv[]) {
// 实例化 椭圆 检测器类
sv::EllipseDetector ed;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ed.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
ed.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
cap.setWH(1280, 720);
cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 4); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;

View File

@@ -66,7 +66,7 @@ int main(int argc, char *argv[])
// 设置获取画面分辨率为720P
cap.setWH(1280, 720);
// 设置视频帧率为30帧
cap.setFps(30);
cap.setFps(60);
// 开启相机
cap.open(sv::CameraType::G1);
@@ -77,10 +77,10 @@ int main(int argc, char *argv[])
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
@@ -248,4 +248,4 @@ void GimableCallback(double &frame_ang_r, double &frame_ang_p, double &frame_ang
count = 0;
}
count++;
}
}

View File

@@ -3,7 +3,7 @@
// 包含SpireCV SDK头文件
#include <sv_world.h>
// #include "gimbal_tools.hpp"
#include <chrono>
using namespace std;
// 云台
@@ -43,11 +43,11 @@ int main(int argc, char *argv[])
// 定义相机
sv::Camera cap;
// 设置相机流媒体地址为 192.168.2.64
cap.setIp("192.168.2.64");
cap.setIp("192.168.1.64");
// 设置获取画面分辨率为720P
cap.setWH(1280, 720);
cap.setWH(640, 480);
// 设置视频帧率为30帧
cap.setFps(30);
cap.setFps(120);
// 开启相机
cap.open(sv::CameraType::G1);
@@ -58,56 +58,63 @@ int main(int argc, char *argv[])
// 实例化 圆形降落标志 检测器类
sv::LandingMarkerDetector lmd;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
lmd.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
lmd.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
for(uint j = 0; j < 60; j++)
{
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(lmd.image_width, lmd.image_height));
//cv::resize(img, img, cv::Size(lmd.image_width, lmd.image_height));
// 执行 降落标志 检测
lmd.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
//sv::drawTargetsInFrame(img, tgts);
// 控制台打印 降落标志 检测结果
printf("Frame-[%d]\n", frame_id);
//printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
for (int i = 0; i < tgts.targets.size(); i++)
{
//printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
//for (int i = 0; i < tgts.targets.size(); i++)
//{
// 仅追踪 X 类型的标靶
if (tgts.targets[i].category_id == 2)
{
gimbalTrack(tgts.targets[0].cx - 0.5f, tgts.targets[0].cy - 0.5f);
}
//if (tgts.targets[i].category_id == 2)
//{
//gimbalTrack(tgts.targets[0].cx - 0.5f, tgts.targets[0].cy - 0.5f);
//}
printf("Frame-[%d], Marker-[%d]\n", frame_id, i);
//printf("Frame-[%d], Marker-[%d]\n", frame_id, i);
// 打印每个 降落标志 的中心位置cxcy的值域为[0, 1]
printf(" Marker Center (cx, cy) = (%.3f, %.3f)\n", tgts.targets[i].cx, tgts.targets[i].cy);
//printf(" Marker Center (cx, cy) = (%.3f, %.3f)\n", tgts.targets[i].cx, tgts.targets[i].cy);
// 打印每个 降落标志 的外接矩形框的宽度、高度wh的值域为(0, 1]
printf(" Marker Size (w, h) = (%.3f, %.3f)\n", tgts.targets[i].w, tgts.targets[i].h);
//printf(" Marker Size (w, h) = (%.3f, %.3f)\n", tgts.targets[i].w, tgts.targets[i].h);
// 打印每个 降落标志 的置信度
printf(" Marker Score = %.3f\n", tgts.targets[i].score);
//printf(" Marker Score = %.3f\n", tgts.targets[i].score);
// 打印每个 降落标志 的类别,字符串类型,"h"、"x"、"1"、"2"、"3"...
printf(" Marker Category = %s, Category ID = %d\n", tgts.targets[i].category.c_str(), tgts.targets[i].category_id);
//printf(" Marker Category = %s, Category ID = %d\n", tgts.targets[i].category.c_str(), tgts.targets[i].category_id);
// 打印每个 降落标志 的视线角,跟相机视场相关
printf(" Marker Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
//printf(" Marker Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
// 打印每个 降落标志 的3D位置在相机坐标系下跟降落标志实际半径、相机参数相关
printf(" Marker Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
}
//printf(" Marker Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, //tgts.targets[i].pz);
//}
// 显示检测结果img
cv::imshow(RGB_WINDOW, img);
cv::waitKey(10);
cv::waitKey(1);
}
auto time2 = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
auto Ts = time2 - time1;
std::cout << "Ts = " << Ts / (1000) << "us" << std::endl;
}
return 0;
@@ -144,4 +151,4 @@ void GimableCallback(double &frame_ang_r, double &frame_ang_p, double &frame_ang
count = 0;
}
count++;
}
}

View File

@@ -79,6 +79,7 @@ int main(int argc, char *argv[])
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
continue;
// 执行Aruco二维码检测
ad.detect(img, tgts);

View File

@@ -2,7 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include <chrono>
using namespace std;
int main(int argc, char *argv[]) {
@@ -13,14 +13,18 @@ int main(int argc, char *argv[]) {
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
cap.setWH(640, 480);
cap.setFps(60);
cap.open(sv::CameraType::WEBCAM, 4); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
for(uint j = 0; j < 60; j++)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
@@ -33,39 +37,43 @@ int main(int argc, char *argv[]) {
sv::drawTargetsInFrame(img, tgts);
// 控制台打印 降落标志 检测结果
printf("Frame-[%d]\n", frame_id);
//printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
//printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
//printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
for (int i=0; i<tgts.targets.size(); i++)
{
printf("Frame-[%d], Marker-[%d]\n", frame_id, i);
//printf("Frame-[%d], Marker-[%d]\n", frame_id, i);
// 打印每个 降落标志 的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Marker Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].cx, tgts.targets[i].cy,
int(tgts.targets[i].cx * tgts.width),
int(tgts.targets[i].cy * tgts.height));
//printf(" Marker Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
//tgts.targets[i].cx, tgts.targets[i].cy,
//int(tgts.targets[i].cx * tgts.width),
//int(tgts.targets[i].cy * tgts.height));
// 打印每个 降落标志 的外接矩形框的宽度、高度wh的值域为(0, 1]以及wh的像素值
printf(" Marker Size (w, h) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].w, tgts.targets[i].h,
int(tgts.targets[i].w * tgts.width),
int(tgts.targets[i].h * tgts.height));
//printf(" Marker Size (w, h) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
//tgts.targets[i].w, tgts.targets[i].h,
//int(tgts.targets[i].w * tgts.width),
//int(tgts.targets[i].h * tgts.height));
// 打印每个 降落标志 的置信度
printf(" Marker Score = %.3f\n", tgts.targets[i].score);
// printf(" Marker Score = %.3f\n", tgts.targets[i].score);
// 打印每个 降落标志 的类别,字符串类型,"h"、"x"、"1"、"2"、"3"...
printf(" Marker Category = %s, Category ID = %d\n", tgts.targets[i].category.c_str(), tgts.targets[i].category_id);
//printf(" Marker Category = %s, Category ID = %d\n", tgts.targets[i].category.c_str(), tgts.targets[i].category_id);
// 打印每个 降落标志 的视线角,跟相机视场相关
printf(" Marker Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
//printf(" Marker Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
// 打印每个 降落标志 的3D位置在相机坐标系下跟降落标志实际半径、相机参数相关
printf(" Marker Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
// printf(" Marker Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
}
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
auto time2 = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
auto Ts = time2 - time1;
std::cout << "Ts = " << Ts / (1000) << "us" << std::endl;
}
return 0;

View File

@@ -28,15 +28,16 @@ int main(int argc, char *argv[]) {
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
//sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1920x1080.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
cap.setWH(1280, 720);
cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 4); // CameraID 0
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;

View File

@@ -0,0 +1,34 @@
#include <sv_world.h>
#include <iostream>
#include <string>
#include <chrono>
int main(int argc, char *argv[])
{
sv::Camera cap;
cap.setIp(argv[1]);
cap.setWH(640, 480);
cap.setFps(60);
cap.open(sv::CameraType::G1);
//cap.open(sv::CameraType::WEBCAM, 4);
cv::Mat img;
//auto time1,time2;
while (1)
{
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
//for (uint16_t i = 0; i < 120; i++)
//{
cap.read(img);
cv::imshow("TEST",img);
cv::waitKey(1);
//}
auto time2 = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
auto Ts = time2 - time1;
std::cout << "read 120 count;Ts = " << Ts / (1000) << "us" << std::endl;
std::cout << "average FPS = " << (1000 * 1000 * 1000) / (Ts / 120) << std::endl;
}
}

0
scripts/test/test_fps.sh Executable file → Normal file
View File

0
scripts/test/test_gimbal.sh Executable file → Normal file
View File

View File

@@ -368,6 +368,58 @@ void UDPServer::send(const TargetsInFrame& tgts_)
}
void drawTargetsInFrame(
cv::Mat& img_,
const TargetsInFrame& tgts_,
int aruco_track_id,
bool with_all,
bool with_category,
bool with_tid,
bool with_seg,
bool with_box,
bool with_ell,
bool with_aruco,
bool with_yaw
)
{
if (tgts_.rois.size() > 0 )
{
cv::Mat image_ret;
cv::addWeighted(img_, 0.5, cv::Mat::zeros(cv::Size(img_.cols, img_.rows), CV_8UC3), 0, 0, image_ret);
cv::Rect roi = cv::Rect(tgts_.rois[0].x1, tgts_.rois[0].y1, tgts_.rois[0].x2 - tgts_.rois[0].x1, tgts_.rois[0].y2 - tgts_.rois[0].y1);
img_(roi).copyTo(image_ret(roi));
image_ret.copyTo(img_);
}
std::vector<std::vector<cv::Point2f> > aruco_corners;
std::vector<int> aruco_ids;
for (Target tgt : tgts_.targets)
{
if ((with_all || with_aruco) && tgt.has_aruco && (tgt.tracked_id == aruco_track_id))
{
std::vector<cv::Point2f> a_corners;
int a_id;
if (tgt.getAruco(a_id, a_corners)) { aruco_ids.push_back(a_id); aruco_corners.push_back(a_corners); }
cv::circle(img_, cv::Point(int(tgt.cx * tgts_.width), int(tgt.cy * tgts_.height)), 4, cv::Scalar(0,255,0), 2);
}
if ((with_all || with_box) && tgt.has_box && (tgt.tracked_id == aruco_track_id))
{
Box b;
tgt.getBox(b);
cv::rectangle(img_, cv::Rect(b.x1, b.y1, b.x2-b.x1+1, b.y2-b.y1+1), cv::Scalar(0,0,255), 1, 1, 0);
if ((with_all || with_category) && tgt.has_category)
{
cv::putText(img_, tgt.category, cv::Point(b.x1, b.y1-4), cv::FONT_HERSHEY_DUPLEX, 0.4, cv::Scalar(255,0,0));
}
}
}
if ((with_all || with_aruco) && aruco_ids.size() > 0)
{
cv::aruco::drawDetectedMarkers(img_, aruco_corners, aruco_ids);
}
}
void drawTargetsInFrame(
cv::Mat& img_,
const TargetsInFrame& tgts_,
@@ -809,7 +861,7 @@ VideoWriterBase::VideoWriterBase()
VideoWriterBase::~VideoWriterBase()
{
this->release();
// this->_tt.join();
this->_tt.join();
}
cv::Size VideoWriterBase::getSize()
{
@@ -991,7 +1043,7 @@ CameraBase::CameraBase(CameraType type, int id)
CameraBase::~CameraBase()
{
this->_is_running = false;
// this->_tt.join();
this->_tt.join();
}
void CameraBase::setWH(int width, int height)
{
@@ -1103,33 +1155,57 @@ void CameraBase::_run()
{
while (this->_is_running && this->_cap.isOpened())
{
this->_cap >> this->_frame;
this->_is_updated = true;
std::this_thread::sleep_for(std::chrono::milliseconds(2));
//this->_cap >> this->_frame;
//this->_is_updated = true;
//std::this_thread::sleep_for(std::chrono::milliseconds(2));
if(this->_cap.grab())
{
std::lock_guard<std::mutex> locker(this->_frame_mutex);
this->_cap.retrieve(this->_frame);
this->_frame_empty.notify_all();
}
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
}
bool CameraBase::read(cv::Mat& image)
{
bool ret = false;
if (this->_type == CameraType::WEBCAM || this->_type == CameraType::G1 || this->_type == CameraType::MIPI)
{
int n_try = 0;
while (n_try < 5000)
std::lock_guard<std::mutex> locker(this->_frame_mutex);
if(this->_frame_empty.wait_for(this->_frame_mutex,std::chrono::milliseconds(2000)) == std::cv_status::no_timeout)
{
if (this->_is_updated)
{
this->_is_updated = false;
this->_frame.copyTo(image);
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(20));
n_try ++;
this->_frame.copyTo(image);
ret = true;
}
else
{
throw std::runtime_error("SpireCV (101) Camera cannot OPEN, check CAMERA_ID!");
}
}
if (image.cols == 0 || image.rows == 0)
{
throw std::runtime_error("SpireCV (101) Camera cannot OPEN, check CAMERA_ID!");
}
return image.cols > 0 && image.rows > 0;
return ret;
if (this->_type == CameraType::WEBCAM || this->_type == CameraType::G1 || this->_type == CameraType::MIPI)
{
int n_try = 0;
while (n_try < 5000)
{
if (this->_is_updated)
{
this->_is_updated = false;
this->_frame.copyTo(image);
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(10));
n_try ++;
}
}
if (image.cols == 0 || image.rows == 0)
{
throw std::runtime_error("SpireCV (101) Camera cannot OPEN, check CAMERA_ID!");
}
return image.cols > 0 && image.rows > 0;
}
void CameraBase::release()
{