Compare commits
No commits in common. "p250" and "master" have entirely different histories.
|
@ -1,6 +1,13 @@
|
|||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
models/
|
||||
confs/
|
||||
ZLM/
|
||||
ZLMediaKit/
|
||||
ffmpeg-4.2.5/
|
||||
nv-codec-headers/
|
||||
*.bz2
|
||||
develop-eggs/
|
||||
dist/
|
||||
eggs/
|
||||
|
@ -15,9 +22,11 @@ share/python-wheels/
|
|||
*.egg
|
||||
MANIFEST
|
||||
.idea/
|
||||
models/
|
||||
models-converting.sh
|
||||
models-downloading.sh
|
||||
calib_webcam_1280x720.yaml
|
||||
calib_webcam_640x480.yaml
|
||||
sv_algorithm_params.json
|
||||
sv_algorithm_params_coco_1280.json
|
||||
sv_algorithm_params_coco_640.json
|
||||
|
||||
# Prerequisites
|
||||
*.d
|
||||
|
|
|
@ -24,6 +24,7 @@ else()
|
|||
option(USE_FFMPEG "BUILD WITH FFMPEG." ON)
|
||||
elseif(PLATFORM STREQUAL "X86_INTEL")
|
||||
add_definitions(-DPLATFORM_X86_INTEL)
|
||||
option(USE_INTEL "BUILD WITH INTEL." ON)
|
||||
option(USE_FFMPEG "BUILD WITH FFMPEG." ON)
|
||||
else()
|
||||
message(FATAL_ERROR "UNSUPPORTED PLATFORM!")
|
||||
|
@ -38,6 +39,10 @@ if(USE_CUDA)
|
|||
message(STATUS "CUDA: ON")
|
||||
endif()
|
||||
|
||||
if(USE_INTEL)
|
||||
add_definitions(-DWITH_INTEL)
|
||||
message(STATUS "INTEL: ON")
|
||||
endif()
|
||||
|
||||
if(USE_GSTREAMER)
|
||||
add_definitions(-DWITH_GSTREAMER)
|
||||
|
@ -69,14 +74,20 @@ include_directories(
|
|||
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/G1
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/AT10
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/Q10f
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/GX40
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/intel
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/intel
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/veri/intel
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/cuda
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/sot/ocv470
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/mot/sort
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/mot/bytetrack
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/color_line
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/video_io
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/veri/cuda
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/veri/cuda
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/ellipse_det
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/utils
|
||||
)
|
||||
|
@ -139,6 +150,8 @@ file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/AT10/
|
|||
list(APPEND driver_SRCS ${DRV_LIB_FILES})
|
||||
file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/Q10f/*.cpp)
|
||||
list(APPEND driver_SRCS ${DRV_LIB_FILES})
|
||||
file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/GX40/*.cpp)
|
||||
list(APPEND driver_SRCS ${DRV_LIB_FILES})
|
||||
file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/*.cpp)
|
||||
list(APPEND driver_SRCS ${DRV_LIB_FILES})
|
||||
|
||||
|
@ -171,6 +184,10 @@ file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/*.cpp)
|
|||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/mot/sort/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/mot/bytetrack/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
|
||||
if(USE_CUDA)
|
||||
list(APPEND spirecv_SRCS algorithm/common_det/cuda/yolov7/preprocess.cu)
|
||||
|
@ -184,7 +201,23 @@ if(USE_CUDA)
|
|||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
endif()
|
||||
|
||||
if(USE_INTEL)
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/intel/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/intel/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/veri/intel/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
endif()
|
||||
|
||||
if(USE_FFMPEG)
|
||||
if(USE_INTEL)
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/x86_intel/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
elseif(USE_CUDA)
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/x86_cuda/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
endif()
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
endif()
|
||||
|
@ -228,10 +261,22 @@ if(USE_CUDA) # PLATFORM_X86_CUDA & PLATFORM_JETSON
|
|||
target_link_libraries(SpireCVSeg sv_world)
|
||||
|
||||
elseif(PLATFORM STREQUAL "X86_INTEL") # Links to Intel-OpenVINO libraries here
|
||||
# Intel-Openvino
|
||||
include_directories(
|
||||
PUBLIC /opt/intel/openvino_2022/runtime/include/
|
||||
PUBLIC /opt/intel/openvino_2022/runtime/include/ie/
|
||||
)
|
||||
link_directories(
|
||||
${InferenceEngine_LIBRARIES}
|
||||
/opt/intel/openvino_2022/runtime/lib/intel64/libopenvino.so
|
||||
)
|
||||
|
||||
add_library(sv_world SHARED ${spirecv_SRCS})
|
||||
target_link_libraries(
|
||||
sv_world ${OpenCV_LIBS}
|
||||
sv_gimbal
|
||||
${InferenceEngine_LIBRARIES}
|
||||
/opt/intel/openvino_2022/runtime/lib/intel64/libopenvino.so
|
||||
)
|
||||
endif()
|
||||
|
||||
|
@ -260,6 +305,8 @@ add_executable(SingleObjectTracking samples/demo/single_object_tracking.cpp)
|
|||
target_link_libraries(SingleObjectTracking sv_world)
|
||||
add_executable(MultipleObjectTracking samples/demo/multiple_object_tracking.cpp)
|
||||
target_link_libraries(MultipleObjectTracking sv_world)
|
||||
add_executable(EvalMOTMetric samples/demo/eval_MOT_metric.cpp)
|
||||
target_link_libraries(EvalMOTMetric -lstdc++fs sv_world)
|
||||
add_executable(ColorLineDetection samples/demo/color_line_detect.cpp)
|
||||
target_link_libraries(ColorLineDetection sv_world)
|
||||
add_executable(UdpDetectionInfoReceiver samples/demo/udp_detection_info_receiver.cpp)
|
||||
|
@ -290,7 +337,9 @@ target_link_libraries(EvalModelOnCocoVal sv_world)
|
|||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/samples/calib)
|
||||
add_executable(CameraCalibrarion samples/calib/calibrate_camera_charuco.cpp)
|
||||
target_link_libraries(CameraCalibrarion ${OpenCV_LIBS})
|
||||
target_link_libraries(CameraCalibrarion ${OpenCV_LIBS} sv_world)
|
||||
add_executable(CreateMarker samples/calib/create_marker.cpp)
|
||||
target_link_libraries(CreateMarker ${OpenCV_LIBS})
|
||||
|
||||
message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
|
||||
if (NOT DEFINED SV_INSTALL_PREFIX)
|
||||
|
@ -309,7 +358,7 @@ if(USE_CUDA)
|
|||
RUNTIME DESTINATION bin
|
||||
)
|
||||
elseif(PLATFORM STREQUAL "X86_INTEL")
|
||||
install(TARGETS sv_world
|
||||
install(TARGETS sv_gimbal sv_world
|
||||
LIBRARY DESTINATION lib
|
||||
)
|
||||
endif()
|
||||
|
|
|
@ -38,9 +38,9 @@ SpireCV is an **real-time edge perception SDK** built for **intelligent unmanned
|
|||
- **Platform level**:
|
||||
- [x] X86 + Nvidia GPUs (10 series, 20 series, and 30 series graphics cards recommended)
|
||||
- [x] Jetson (AGX Orin/Xavier、Orin NX/Nano、Xavier NX)
|
||||
- [ ] Intel CPU (coming soon)
|
||||
- [ ] Rockchip (coming soon)
|
||||
- [x] Intel CPU
|
||||
- [ ] HUAWEI Ascend (coming soon)
|
||||
- [ ] Rockchip (coming soon)
|
||||
|
||||
## Demos
|
||||
- **QR code detection**
|
||||
|
|
|
@ -12,7 +12,7 @@ SpireCV是一个专为**智能无人系统**打造的**边缘实时感知SDK**
|
|||
|
||||
## 快速入门
|
||||
|
||||
- 安装及使用:[SpireCV使用手册](https://docs.amovlab.com/Spire_CV_Amov/#/)、[SpireCV开发者套件指南](https://docs.amovlab.com/spirecvkit/#/)
|
||||
- 安装及使用:[SpireCV使用手册](https://docs.amovlab.com/Spire_CV_Amov/#/)([wolai版本](https://www.wolai.com/4qWFM6aZmtpQE6jj7hnNMW))、[SpireCV开发者套件指南](https://docs.amovlab.com/spirecvkit/#/)
|
||||
- 需掌握C++语言基础、CMake编译工具基础。
|
||||
- 需要掌握OpenCV视觉库基础,了解CUDA、OpenVINO、RKNN和CANN等计算库。
|
||||
- 需要了解ROS基本概念及基本操作。
|
||||
|
@ -38,9 +38,9 @@ SpireCV是一个专为**智能无人系统**打造的**边缘实时感知SDK**
|
|||
- **平台层**:
|
||||
- [x] X86+Nvidia GPU(推荐10系、20系、30系显卡)
|
||||
- [x] Jetson(AGX Orin/Xavier、Orin NX/Nano、Xavier NX)
|
||||
- [ ] Intel CPU(推进中)
|
||||
- [ ] Rockchip(推进中)
|
||||
- [x] Intel CPU
|
||||
- [ ] HUAWEI Ascend(推进中)
|
||||
- [ ] Rockchip(推进中)
|
||||
|
||||
## 功能展示
|
||||
- **二维码检测**
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#include "common_det_cuda_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
@ -103,7 +105,7 @@ void infer_seg(IExecutionContext& context, cudaStream_t& stream, void **buffers,
|
|||
CUDA_CHECK(cudaMemcpyAsync(output2, buffers[2], batchSize * kOutputSize2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
|
||||
cudaStreamSynchronize(stream);
|
||||
}
|
||||
void CommonObjectDetectorCUDAImpl::_prepare_buffers(int input_h, int input_w) {
|
||||
void CommonObjectDetectorCUDAImpl::_prepare_buffers(int input_h, int input_w, int batchsize) {
|
||||
assert(this->_engine->getNbBindings() == 2);
|
||||
// In order to bind the buffers, we need to know the names of the input and output tensors.
|
||||
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
|
||||
|
@ -112,12 +114,12 @@ void CommonObjectDetectorCUDAImpl::_prepare_buffers(int input_h, int input_w) {
|
|||
assert(inputIndex == 0);
|
||||
assert(outputIndex == 1);
|
||||
// Create GPU buffers on device
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[0]), kBatchSize * 3 * input_h * input_w * sizeof(float)));
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[1]), kBatchSize * kOutputSize * sizeof(float)));
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[0]), batchsize * 3 * input_h * input_w * sizeof(float)));
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[1]), batchsize * kOutputSize * sizeof(float)));
|
||||
|
||||
this->_cpu_output_buffer = new float[kBatchSize * kOutputSize];
|
||||
this->_cpu_output_buffer = new float[batchsize * kOutputSize];
|
||||
}
|
||||
void CommonObjectDetectorCUDAImpl::_prepare_buffers_seg(int input_h, int input_w) {
|
||||
void CommonObjectDetectorCUDAImpl::_prepare_buffers_seg(int input_h, int input_w, int batchsize) {
|
||||
assert(this->_engine->getNbBindings() == 3);
|
||||
// In order to bind the buffers, we need to know the names of the input and output tensors.
|
||||
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
|
||||
|
@ -129,13 +131,13 @@ void CommonObjectDetectorCUDAImpl::_prepare_buffers_seg(int input_h, int input_w
|
|||
assert(outputIndex2 == 2);
|
||||
|
||||
// Create GPU buffers on device
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[0]), kBatchSize * 3 * input_h * input_w * sizeof(float)));
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[1]), kBatchSize * kOutputSize1 * sizeof(float)));
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[2]), kBatchSize * kOutputSize2 * sizeof(float)));
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[0]), batchsize * 3 * input_h * input_w * sizeof(float)));
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[1]), batchsize * kOutputSize1 * sizeof(float)));
|
||||
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[2]), batchsize * kOutputSize2 * sizeof(float)));
|
||||
|
||||
// Alloc CPU buffers
|
||||
this->_cpu_output_buffer1 = new float[kBatchSize * kOutputSize1];
|
||||
this->_cpu_output_buffer2 = new float[kBatchSize * kOutputSize2];
|
||||
this->_cpu_output_buffer1 = new float[batchsize * kOutputSize1];
|
||||
this->_cpu_output_buffer2 = new float[batchsize * kOutputSize2];
|
||||
}
|
||||
void deserialize_engine(std::string& engine_name, IRuntime** runtime, ICudaEngine** engine, IExecutionContext** context) {
|
||||
std::ifstream file(engine_name, std::ios::binary);
|
||||
|
@ -172,7 +174,8 @@ void CommonObjectDetectorCUDAImpl::cudaDetect(
|
|||
std::vector<float>& boxes_h_,
|
||||
std::vector<int>& boxes_label_,
|
||||
std::vector<float>& boxes_score_,
|
||||
std::vector<cv::Mat>& boxes_seg_
|
||||
std::vector<cv::Mat>& boxes_seg_,
|
||||
bool input_4k_
|
||||
)
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
|
@ -183,9 +186,51 @@ void CommonObjectDetectorCUDAImpl::cudaDetect(
|
|||
double thrs_nms = base_->getThrsNms();
|
||||
|
||||
std::vector<cv::Mat> img_batch;
|
||||
img_batch.push_back(img_);
|
||||
// Preprocess
|
||||
cuda_batch_preprocess(img_batch, this->_gpu_buffers[0], input_w, input_h, this->_stream);
|
||||
if (input_4k_)
|
||||
{
|
||||
if (img_.cols == 3840 && img_.rows == 2160)
|
||||
{
|
||||
cv::Mat patch1, patch2, patch3, patch4, patch5, patch6;
|
||||
|
||||
img_.colRange(200, 1480).rowRange(0, 1280).copyTo(patch1);
|
||||
img_.colRange(1280, 2560).rowRange(0, 1280).copyTo(patch2);
|
||||
img_.colRange(2360, 3640).rowRange(0, 1280).copyTo(patch3);
|
||||
|
||||
img_.colRange(200, 1480).rowRange(880, 2160).copyTo(patch4);
|
||||
img_.colRange(1280, 2560).rowRange(880, 2160).copyTo(patch5);
|
||||
img_.colRange(2360, 3640).rowRange(880, 2160).copyTo(patch6);
|
||||
|
||||
img_batch.push_back(patch1);
|
||||
img_batch.push_back(patch2);
|
||||
img_batch.push_back(patch3);
|
||||
img_batch.push_back(patch4);
|
||||
img_batch.push_back(patch5);
|
||||
img_batch.push_back(patch6);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error("SpireCV (106) Input image is NOT 4K (3840 x 2160)!");
|
||||
}
|
||||
if (with_segmentation)
|
||||
{
|
||||
throw std::runtime_error("SpireCV (106) Resolution 4K DO NOT Support Segmentation!");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
img_batch.push_back(img_);
|
||||
}
|
||||
|
||||
if (input_4k_)
|
||||
{
|
||||
// Preprocess
|
||||
cuda_batch_preprocess(img_batch, this->_gpu_buffers[0], 1280, 1280, this->_stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Preprocess
|
||||
cuda_batch_preprocess(img_batch, this->_gpu_buffers[0], input_w, input_h, this->_stream);
|
||||
}
|
||||
|
||||
// Run inference
|
||||
if (with_segmentation)
|
||||
|
@ -194,7 +239,14 @@ void CommonObjectDetectorCUDAImpl::cudaDetect(
|
|||
}
|
||||
else
|
||||
{
|
||||
infer(*this->_context, this->_stream, (void**)this->_gpu_buffers, this->_cpu_output_buffer, kBatchSize);
|
||||
if (input_4k_)
|
||||
{
|
||||
infer(*this->_context, this->_stream, (void**)this->_gpu_buffers, this->_cpu_output_buffer, 6);
|
||||
}
|
||||
else
|
||||
{
|
||||
infer(*this->_context, this->_stream, (void**)this->_gpu_buffers, this->_cpu_output_buffer, kBatchSize);
|
||||
}
|
||||
}
|
||||
|
||||
// NMS
|
||||
|
@ -208,45 +260,102 @@ void CommonObjectDetectorCUDAImpl::cudaDetect(
|
|||
batch_nms(res_batch, this->_cpu_output_buffer, img_batch.size(), kOutputSize, thrs_conf, thrs_nms);
|
||||
}
|
||||
|
||||
std::vector<Detection> res = res_batch[0];
|
||||
std::vector<cv::Mat> masks;
|
||||
if (with_segmentation)
|
||||
|
||||
if (input_4k_)
|
||||
{
|
||||
masks = process_mask(&(this->_cpu_output_buffer2[0]), kOutputSize2, res, input_h, input_w);
|
||||
}
|
||||
|
||||
|
||||
|
||||
for (size_t j = 0; j < res.size(); j++) {
|
||||
cv::Rect r = get_rect(img_, res[j].bbox, input_h, input_w);
|
||||
if (r.x < 0) r.x = 0;
|
||||
if (r.y < 0) r.y = 0;
|
||||
if (r.x + r.width >= img_.cols) r.width = img_.cols - r.x - 1;
|
||||
if (r.y + r.height >= img_.rows) r.height = img_.rows - r.y - 1;
|
||||
if (r.width > 5 && r.height > 5)
|
||||
for (size_t k = 0; k < res_batch.size(); k++)
|
||||
{
|
||||
// cv::rectangle(img_show, r, cv::Scalar(0, 0, 255), 2);
|
||||
// cv::putText(img_show, vehiclenames[(int)res[j].class_id], cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 2.2, cv::Scalar(0, 0, 255), 2);
|
||||
boxes_x_.push_back(r.x);
|
||||
boxes_y_.push_back(r.y);
|
||||
boxes_w_.push_back(r.width);
|
||||
boxes_h_.push_back(r.height);
|
||||
|
||||
boxes_label_.push_back((int)res[j].class_id);
|
||||
boxes_score_.push_back(res[j].conf);
|
||||
|
||||
if (with_segmentation)
|
||||
std::vector<Detection> res = res_batch[k];
|
||||
for (size_t j = 0; j < res.size(); j++)
|
||||
{
|
||||
cv::Mat mask_j = masks[j].clone();
|
||||
boxes_seg_.push_back(mask_j);
|
||||
cv::Rect r = get_rect(img_batch[k], res[j].bbox, 1280, 1280);
|
||||
if (r.x < 0) r.x = 0;
|
||||
if (r.y < 0) r.y = 0;
|
||||
if (r.x + r.width >= 1280) r.width = 1280 - r.x - 1;
|
||||
if (r.y + r.height >= 1280) r.height = 1280 - r.y - 1;
|
||||
if (r.width > 3 && r.height > 3)
|
||||
{
|
||||
if (0 == k)
|
||||
{
|
||||
boxes_x_.push_back(r.x + 200);
|
||||
boxes_y_.push_back(r.y);
|
||||
}
|
||||
else if (1 == k)
|
||||
{
|
||||
boxes_x_.push_back(r.x + 1280);
|
||||
boxes_y_.push_back(r.y);
|
||||
}
|
||||
else if (2 == k)
|
||||
{
|
||||
boxes_x_.push_back(r.x + 2360);
|
||||
boxes_y_.push_back(r.y);
|
||||
}
|
||||
else if (3 == k)
|
||||
{
|
||||
boxes_x_.push_back(r.x + 200);
|
||||
boxes_y_.push_back(r.y + 880);
|
||||
}
|
||||
else if (4 == k)
|
||||
{
|
||||
boxes_x_.push_back(r.x + 1280);
|
||||
boxes_y_.push_back(r.y + 880);
|
||||
}
|
||||
else if (5 == k)
|
||||
{
|
||||
boxes_x_.push_back(r.x + 2360);
|
||||
boxes_y_.push_back(r.y + 880);
|
||||
}
|
||||
boxes_w_.push_back(r.width);
|
||||
boxes_h_.push_back(r.height);
|
||||
|
||||
boxes_label_.push_back((int)res[j].class_id);
|
||||
boxes_score_.push_back(res[j].conf);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
std::vector<Detection> res = res_batch[0];
|
||||
std::vector<cv::Mat> masks;
|
||||
if (with_segmentation)
|
||||
{
|
||||
masks = process_mask(&(this->_cpu_output_buffer2[0]), kOutputSize2, res, input_h, input_w);
|
||||
}
|
||||
|
||||
for (size_t j = 0; j < res.size(); j++)
|
||||
{
|
||||
cv::Rect r = get_rect(img_, res[j].bbox, input_h, input_w);
|
||||
if (r.x < 0) r.x = 0;
|
||||
if (r.y < 0) r.y = 0;
|
||||
if (r.x + r.width >= img_.cols) r.width = img_.cols - r.x - 1;
|
||||
if (r.y + r.height >= img_.rows) r.height = img_.rows - r.y - 1;
|
||||
if (r.width > 5 && r.height > 5)
|
||||
{
|
||||
// cv::rectangle(img_show, r, cv::Scalar(0, 0, 255), 2);
|
||||
// cv::putText(img_show, vehiclenames[(int)res[j].class_id], cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 2.2, cv::Scalar(0, 0, 255), 2);
|
||||
boxes_x_.push_back(r.x);
|
||||
boxes_y_.push_back(r.y);
|
||||
boxes_w_.push_back(r.width);
|
||||
boxes_h_.push_back(r.height);
|
||||
|
||||
boxes_label_.push_back((int)res[j].class_id);
|
||||
boxes_score_.push_back(res[j].conf);
|
||||
|
||||
if (with_segmentation)
|
||||
{
|
||||
cv::Mat mask_j = masks[j].clone();
|
||||
boxes_seg_.push_back(mask_j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_)
|
||||
bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_, bool input_4k_)
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
std::string dataset = base_->getDataset();
|
||||
|
@ -255,23 +364,62 @@ bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_)
|
|||
bool with_segmentation = base_->withSegmentation();
|
||||
double thrs_conf = base_->getThrsConf();
|
||||
double thrs_nms = base_->getThrsNms();
|
||||
std::string model = base_->getModel();
|
||||
int bs = base_->getBatchSize();
|
||||
char bs_c[8];
|
||||
sprintf(bs_c, "%d", bs);
|
||||
std::string bs_s(bs_c);
|
||||
|
||||
std::string engine_fn = get_home() + SV_MODEL_DIR + dataset + ".engine";
|
||||
std::vector<std::string> files;
|
||||
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-" + dataset + "-yolov5" + model + "_b" + bs_s + "_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
engine_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
|
||||
if (input_w == 1280)
|
||||
{
|
||||
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.engine";
|
||||
files.clear();
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-" + dataset + "-yolov5" + model + "6_b" + bs_s + "_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
engine_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.engine";
|
||||
}
|
||||
}
|
||||
if (with_segmentation)
|
||||
{
|
||||
base_->setInputH(640);
|
||||
base_->setInputW(640);
|
||||
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.engine";
|
||||
files.clear();
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-" + dataset + "-yolov5" + model + "_seg_b" + bs_s + "_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
engine_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.engine";
|
||||
}
|
||||
}
|
||||
std::cout << "Load: " << engine_fn << std::endl;
|
||||
if (!is_file_exist(engine_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the CommonObject TensorRT model (File Not Exist)");
|
||||
}
|
||||
|
||||
if (input_4k_ && with_segmentation)
|
||||
{
|
||||
throw std::runtime_error("SpireCV (106) Resolution 4K DO NOT Support Segmentation!");
|
||||
}
|
||||
|
||||
deserialize_engine(engine_fn, &this->_runtime, &this->_engine, &this->_context);
|
||||
CUDA_CHECK(cudaStreamCreate(&this->_stream));
|
||||
|
@ -282,29 +430,25 @@ bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_)
|
|||
if (with_segmentation)
|
||||
{
|
||||
// Prepare cpu and gpu buffers
|
||||
this->_prepare_buffers_seg(input_h, input_w);
|
||||
this->_prepare_buffers_seg(input_h, input_w, 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Prepare cpu and gpu buffers
|
||||
this->_prepare_buffers(input_h, input_w);
|
||||
if (input_4k_)
|
||||
{
|
||||
// Prepare cpu and gpu buffers
|
||||
this->_prepare_buffers(input_h, input_w, 6);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Prepare cpu and gpu buffers
|
||||
this->_prepare_buffers(input_h, input_w, 1);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ public:
|
|||
CommonObjectDetectorCUDAImpl();
|
||||
~CommonObjectDetectorCUDAImpl();
|
||||
|
||||
bool cudaSetup(CommonObjectDetectorBase* base_);
|
||||
bool cudaSetup(CommonObjectDetectorBase* base_, bool input_4k_);
|
||||
void cudaDetect(
|
||||
CommonObjectDetectorBase* base_,
|
||||
cv::Mat img_,
|
||||
|
@ -36,12 +36,13 @@ public:
|
|||
std::vector<float>& boxes_h_,
|
||||
std::vector<int>& boxes_label_,
|
||||
std::vector<float>& boxes_score_,
|
||||
std::vector<cv::Mat>& boxes_seg_
|
||||
std::vector<cv::Mat>& boxes_seg_,
|
||||
bool input_4k_
|
||||
);
|
||||
|
||||
#ifdef WITH_CUDA
|
||||
void _prepare_buffers_seg(int input_h, int input_w);
|
||||
void _prepare_buffers(int input_h, int input_w);
|
||||
void _prepare_buffers_seg(int input_h, int input_w, int batchsize);
|
||||
void _prepare_buffers(int input_h, int input_w, int batchsize);
|
||||
nvinfer1::IExecutionContext* _context;
|
||||
nvinfer1::IRuntime* _runtime;
|
||||
nvinfer1::ICudaEngine* _engine;
|
||||
|
|
|
@ -0,0 +1,479 @@
|
|||
#include "common_det_intel_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
using namespace dnn;
|
||||
#endif
|
||||
|
||||
float sigmoid_function(float a)
|
||||
{
|
||||
float b = 1. / (1. + exp(-a));
|
||||
return b;
|
||||
}
|
||||
|
||||
cv::Mat letterbox(cv::Mat &img_, std::vector<float> &paddings)
|
||||
{
|
||||
std::vector<int> new_shape = {640, 640};
|
||||
|
||||
// Get current image shape [height, width]
|
||||
int img_h = img_.rows;
|
||||
int img_w = img_.cols;
|
||||
|
||||
// Compute scale ratio(new / old) and target resized shape
|
||||
float scale = std::min(new_shape[1] * 1.0 / img_h, new_shape[0] * 1.0 / img_w);
|
||||
int resize_h = int(round(img_h * scale));
|
||||
int resize_w = int(round(img_w * scale));
|
||||
paddings[0] = scale;
|
||||
|
||||
// Compute padding
|
||||
int pad_h = new_shape[1] - resize_h;
|
||||
int pad_w = new_shape[0] - resize_w;
|
||||
|
||||
// Resize and pad image while meeting stride-multiple constraints
|
||||
cv::Mat resized_img;
|
||||
cv::resize(img_, resized_img, cv::Size(resize_w, resize_h));
|
||||
|
||||
// divide padding into 2 sides
|
||||
float half_h = pad_h * 1.0 / 2;
|
||||
float half_w = pad_w * 1.0 / 2;
|
||||
paddings[1] = half_h;
|
||||
paddings[2] = half_w;
|
||||
|
||||
// Compute padding boarder
|
||||
int top = int(round(half_h - 0.1));
|
||||
int bottom = int(round(half_h + 0.1));
|
||||
int left = int(round(half_w - 0.1));
|
||||
int right = int(round(half_w + 0.1));
|
||||
|
||||
// Add border
|
||||
cv::copyMakeBorder(resized_img, resized_img, top, bottom, left, right, 0, cv::Scalar(114, 114, 114));
|
||||
|
||||
return resized_img;
|
||||
}
|
||||
|
||||
CommonObjectDetectorIntelImpl::CommonObjectDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
CommonObjectDetectorIntelImpl::~CommonObjectDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::intelDetect(
|
||||
CommonObjectDetectorBase *base_,
|
||||
cv::Mat img_,
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_,
|
||||
bool input_4k_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
int input_h = base_->getInputH();
|
||||
int input_w = base_->getInputW();
|
||||
bool with_segmentation = base_->withSegmentation();
|
||||
double thrs_conf = base_->getThrsConf();
|
||||
double thrs_nms = base_->getThrsNms();
|
||||
|
||||
if (with_segmentation)
|
||||
{
|
||||
std::vector<float> paddings(3); // scale, half_h, half_w
|
||||
this->preprocess_img_seg(img_, paddings);
|
||||
|
||||
infer_request.start_async();
|
||||
infer_request.wait();
|
||||
|
||||
// Postprocess
|
||||
this->postprocess_img_seg(img_, paddings, boxes_x_, boxes_y_, boxes_w_, boxes_h_, boxes_label_, boxes_score_, boxes_seg_, thrs_conf, thrs_nms);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Preprocess
|
||||
this->preprocess_img(img_);
|
||||
|
||||
// Run inference
|
||||
infer_request.start_async();
|
||||
infer_request.wait();
|
||||
|
||||
// Postprocess
|
||||
this->postprocess_img(boxes_x_, boxes_y_, boxes_w_, boxes_h_, boxes_label_, boxes_score_, thrs_conf, thrs_nms);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
bool CommonObjectDetectorIntelImpl::intelSetup(CommonObjectDetectorBase *base_, bool input_4k_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
ov::Core core;
|
||||
std::string dataset = base_->getDataset();
|
||||
double thrs_conf = base_->getThrsConf();
|
||||
double thrs_nms = base_->getThrsNms();
|
||||
inpHeight = base_->getInputH();
|
||||
inpWidth = base_->getInputW();
|
||||
with_segmentation = base_->withSegmentation();
|
||||
std::string model = base_->getModel();
|
||||
|
||||
std::string openvino_fn = get_home() + SV_MODEL_DIR + dataset + ".onnx";
|
||||
std::vector<std::string> files;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-" + dataset + "-yolov5" + model + "_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
|
||||
if (inpWidth == 1280)
|
||||
{
|
||||
files.clear();
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-" + dataset + "-yolov5" + model + "6_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.onnx";
|
||||
}
|
||||
}
|
||||
if (with_segmentation)
|
||||
{
|
||||
base_->setInputH(640);
|
||||
base_->setInputW(640);
|
||||
files.clear();
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-" + dataset + "-yolov5" + model + "_seg_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.onnx";
|
||||
}
|
||||
}
|
||||
std::cout << "Load: " << openvino_fn << std::endl;
|
||||
if (!is_file_exist(openvino_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the CommonObject OpenVINO model (File Not Exist)");
|
||||
}
|
||||
|
||||
if (input_4k_ && with_segmentation)
|
||||
{
|
||||
throw std::runtime_error("SpireCV (106) Resolution 4K DO NOT Support Segmentation!");
|
||||
}
|
||||
|
||||
if (with_segmentation)
|
||||
{
|
||||
this->compiled_model = core.compile_model(openvino_fn, "GPU");
|
||||
this->infer_request = compiled_model.create_infer_request();
|
||||
}
|
||||
else
|
||||
{
|
||||
std::shared_ptr<ov::Model> model_ = core.read_model(openvino_fn);
|
||||
ov::preprocess::PrePostProcessor Pre_P = ov::preprocess::PrePostProcessor(model_);
|
||||
Pre_P.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::RGB);
|
||||
Pre_P.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255, 255, 255}); // .scale({ 112, 112, 112 });
|
||||
Pre_P.input().model().set_layout("NCHW");
|
||||
Pre_P.output().tensor().set_element_type(ov::element::f32);
|
||||
model_ = Pre_P.build();
|
||||
this->compiled_model = core.compile_model(model_, "GPU");
|
||||
this->infer_request = compiled_model.create_infer_request();
|
||||
}
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::preprocess_img(cv::Mat &img_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
float width = img_.cols;
|
||||
float height = img_.rows;
|
||||
cv::Size new_shape = cv::Size(inpHeight, inpWidth);
|
||||
float r = float(new_shape.width / max(width, height));
|
||||
int new_unpadW = int(round(width * r));
|
||||
int new_unpadH = int(round(height * r));
|
||||
|
||||
cv::resize(img_, resize.resized_image, cv::Size(new_unpadW, new_unpadH), 0, 0, cv::INTER_AREA);
|
||||
resize.resized_image = resize.resized_image;
|
||||
resize.dw = new_shape.width - new_unpadW;
|
||||
resize.dh = new_shape.height - new_unpadH;
|
||||
cv::Scalar color = cv::Scalar(100, 100, 100);
|
||||
cv::copyMakeBorder(resize.resized_image, resize.resized_image, 0, resize.dh, 0, resize.dw, cv::BORDER_CONSTANT, color);
|
||||
|
||||
this->rx = (float)img_.cols / (float)(resize.resized_image.cols - resize.dw);
|
||||
this->ry = (float)img_.rows / (float)(resize.resized_image.rows - resize.dh);
|
||||
if (with_segmentation)
|
||||
{
|
||||
cv::Mat blob = cv::dnn::blobFromImage(resize.resized_image, 1 / 255.0, cv::Size(640, 640), cv::Scalar(0, 0, 0), true);
|
||||
auto input_port = compiled_model.input();
|
||||
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blob.ptr(0));
|
||||
infer_request.set_input_tensor(input_tensor);
|
||||
}
|
||||
else
|
||||
{
|
||||
float *input_data = (float *)resize.resized_image.data;
|
||||
input_tensor = ov::Tensor(compiled_model.input().get_element_type(), compiled_model.input().get_shape(), input_data);
|
||||
infer_request.set_input_tensor(input_tensor);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::preprocess_img_seg(cv::Mat &img_, std::vector<float> &paddings)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
cv::Mat masked_img;
|
||||
cv::Mat resized_img = letterbox(img_, paddings); // resize to (640,640) by letterbox
|
||||
// BGR->RGB, u8(0-255)->f32(0.0-1.0), HWC->NCHW
|
||||
cv::Mat blob = cv::dnn::blobFromImage(resized_img, 1 / 255.0, cv::Size(640, 640), cv::Scalar(0, 0, 0), true);
|
||||
|
||||
// Get input port for model with one input
|
||||
auto input_port = compiled_model.input();
|
||||
// Create tensor from external memory
|
||||
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blob.ptr(0));
|
||||
// Set input tensor for model with one input
|
||||
infer_request.set_input_tensor(input_tensor);
|
||||
#endif
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::postprocess_img_seg(cv::Mat &img_,
|
||||
std::vector<float> &paddings,
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_,
|
||||
double &thrs_conf,
|
||||
double &thrs_nms)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
const ov::Tensor &detect = infer_request.get_output_tensor(0);
|
||||
ov::Shape detect_shape = detect.get_shape();
|
||||
const ov::Tensor &proto = infer_request.get_output_tensor(1);
|
||||
ov::Shape proto_shape = proto.get_shape();
|
||||
|
||||
cv::Mat detect_buffer(detect_shape[1], detect_shape[2], CV_32F, detect.data());
|
||||
cv::Mat proto_buffer(proto_shape[1], proto_shape[2] * proto_shape[3], CV_32F, proto.data());
|
||||
|
||||
cv::RNG rng;
|
||||
float conf_threshold = thrs_conf;
|
||||
float nms_threshold = thrs_nms;
|
||||
std::vector<cv::Rect> boxes;
|
||||
std::vector<int> class_ids;
|
||||
std::vector<float> class_scores;
|
||||
std::vector<float> confidences;
|
||||
std::vector<cv::Mat> masks;
|
||||
|
||||
float scale = paddings[0];
|
||||
for (int i = 0; i < detect_buffer.rows; i++)
|
||||
{
|
||||
float confidence = detect_buffer.at<float>(i, 4);
|
||||
if (confidence < conf_threshold)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
cv::Mat classes_scores = detect_buffer.row(i).colRange(5, 85);
|
||||
cv::Point class_id;
|
||||
double score;
|
||||
cv::minMaxLoc(classes_scores, NULL, &score, NULL, &class_id);
|
||||
|
||||
// class score: 0~1
|
||||
if (score > 0.25)
|
||||
{
|
||||
cv::Mat mask = detect_buffer.row(i).colRange(85, 117);
|
||||
float cx = detect_buffer.at<float>(i, 0);
|
||||
float cy = detect_buffer.at<float>(i, 1);
|
||||
float w = detect_buffer.at<float>(i, 2);
|
||||
float h = detect_buffer.at<float>(i, 3);
|
||||
int left = static_cast<int>((cx - 0.5 * w - paddings[2]) / scale);
|
||||
int top = static_cast<int>((cy - 0.5 * h - paddings[1]) / scale);
|
||||
int width = static_cast<int>(w / scale);
|
||||
int height = static_cast<int>(h / scale);
|
||||
cv::Rect box;
|
||||
box.x = left;
|
||||
box.y = top;
|
||||
box.width = width;
|
||||
box.height = height;
|
||||
|
||||
boxes.push_back(box);
|
||||
class_ids.push_back(class_id.x);
|
||||
class_scores.push_back(score);
|
||||
confidences.push_back(confidence);
|
||||
masks.push_back(mask);
|
||||
}
|
||||
}
|
||||
|
||||
// NMS
|
||||
std::vector<int> indices;
|
||||
cv::dnn::NMSBoxes(boxes, confidences, thrs_conf, thrs_nms, indices);
|
||||
// cv::Mat rgb_mask;
|
||||
cv::Mat rgb_mask = cv::Mat::zeros(img_.size(), img_.type());
|
||||
|
||||
for (size_t i = 0; i < indices.size(); i++)
|
||||
{
|
||||
int index = indices[i];
|
||||
int class_id = class_ids[index];
|
||||
cv::Rect box = boxes[index];
|
||||
int x1 = std::max(0, box.x);
|
||||
int y1 = std::max(0, box.y);
|
||||
int x2 = std::max(0, box.br().x);
|
||||
int y2 = std::max(0, box.br().y);
|
||||
|
||||
cv::Mat m = masks[index] * proto_buffer;
|
||||
for (int col = 0; col < m.cols; col++)
|
||||
{
|
||||
m.at<float>(0, col) = sigmoid_function(m.at<float>(0, col));
|
||||
}
|
||||
cv::Mat m1 = m.reshape(1, 160); // 1x25600 -> 160x160
|
||||
int mx1 = std::max(0, int((x1 * scale + paddings[2]) * 0.25));
|
||||
int mx2 = std::max(0, int((x2 * scale + paddings[2]) * 0.25));
|
||||
int my1 = std::max(0, int((y1 * scale + paddings[1]) * 0.25));
|
||||
int my2 = std::max(0, int((y2 * scale + paddings[1]) * 0.25));
|
||||
cv::Mat mask_roi = m1(cv::Range(my1, my2), cv::Range(mx1, mx2));
|
||||
|
||||
cv::Mat rm, det_mask;
|
||||
cv::resize(mask_roi, rm, cv::Size(x2 - x1, y2 - y1));
|
||||
for (int r = 0; r < rm.rows; r++)
|
||||
{
|
||||
for (int c = 0; c < rm.cols; c++)
|
||||
{
|
||||
float pv = rm.at<float>(r, c);
|
||||
if (pv > 0.5)
|
||||
{
|
||||
rm.at<float>(r, c) = 1.0;
|
||||
}
|
||||
else
|
||||
{
|
||||
rm.at<float>(r, c) = 0.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rm = rm * rng.uniform(0, 255);
|
||||
rm.convertTo(det_mask, CV_8UC1);
|
||||
if ((y1 + det_mask.rows) >= img_.rows)
|
||||
{
|
||||
y2 = img_.rows - 1;
|
||||
}
|
||||
if ((x1 + det_mask.cols) >= img_.cols)
|
||||
{
|
||||
x2 = img_.cols - 1;
|
||||
}
|
||||
|
||||
cv::Mat mask = cv::Mat::zeros(cv::Size(img_.cols, img_.rows), CV_8UC1);
|
||||
det_mask(cv::Range(0, y2 - y1), cv::Range(0, x2 - x1)).copyTo(mask(cv::Range(y1, y2), cv::Range(x1, x2)));
|
||||
add(rgb_mask, cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), rgb_mask, mask);
|
||||
|
||||
boxes_x_.push_back(box.x);
|
||||
boxes_y_.push_back(box.y);
|
||||
boxes_w_.push_back(box.width);
|
||||
boxes_h_.push_back(box.height);
|
||||
|
||||
boxes_label_.push_back((int)class_id);
|
||||
boxes_score_.push_back(class_scores[index]);
|
||||
|
||||
cv::Mat mask_j = mask.clone();
|
||||
boxes_seg_.push_back(mask_j);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::postprocess_img(std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
double &thrs_conf,
|
||||
double &thrs_nms)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
const ov::Tensor &output_tensor = infer_request.get_output_tensor();
|
||||
ov::Shape output_shape = output_tensor.get_shape();
|
||||
float *detections = output_tensor.data<float>();
|
||||
|
||||
std::vector<cv::Rect> boxes;
|
||||
vector<int> class_ids;
|
||||
vector<float> confidences;
|
||||
for (int i = 0; i < output_shape[1]; i++)
|
||||
{
|
||||
float *detection = &detections[i * output_shape[2]];
|
||||
|
||||
float confidence = detection[4];
|
||||
if (confidence >= thrs_conf)
|
||||
{
|
||||
float *classes_scores = &detection[5];
|
||||
cv::Mat scores(1, output_shape[2] - 5, CV_32FC1, classes_scores);
|
||||
cv::Point class_id;
|
||||
double max_class_score;
|
||||
cv::minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
|
||||
if (max_class_score > thrs_conf)
|
||||
{
|
||||
confidences.push_back(confidence);
|
||||
class_ids.push_back(class_id.x);
|
||||
float x = detection[0];
|
||||
float y = detection[1];
|
||||
float w = detection[2];
|
||||
float h = detection[3];
|
||||
float xmin = x - (w / 2);
|
||||
float ymin = y - (h / 2);
|
||||
|
||||
boxes.push_back(cv::Rect(xmin, ymin, w, h));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<int> nms_result;
|
||||
cv::dnn::NMSBoxes(boxes, confidences, thrs_conf, thrs_nms, nms_result);
|
||||
|
||||
std::vector<Detection> output;
|
||||
for (int i = 0; i < nms_result.size(); i++)
|
||||
{
|
||||
Detection result;
|
||||
int idx = nms_result[i];
|
||||
result.class_id = class_ids[idx];
|
||||
result.confidence = confidences[idx];
|
||||
result.box = boxes[idx];
|
||||
output.push_back(result);
|
||||
}
|
||||
|
||||
for (int i = 0; i < output.size(); i++)
|
||||
{
|
||||
auto detection = output[i];
|
||||
auto box = detection.box;
|
||||
auto classId = detection.class_id;
|
||||
auto confidence = detection.confidence;
|
||||
|
||||
float xmax = box.x + box.width;
|
||||
float ymax = box.y + box.height;
|
||||
|
||||
boxes_x_.push_back(this->rx * box.x);
|
||||
boxes_y_.push_back(this->ry * box.y);
|
||||
boxes_w_.push_back(this->rx * box.width);
|
||||
boxes_h_.push_back(this->ry * box.height);
|
||||
|
||||
boxes_label_.push_back((int)detection.class_id);
|
||||
boxes_score_.push_back(detection.confidence);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
#ifndef __SV_COMMON_DET_INTEL__
|
||||
#define __SV_COMMON_DET_INTEL__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <opencv2/dnn.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#endif
|
||||
|
||||
struct Resize
|
||||
{
|
||||
cv::Mat resized_image;
|
||||
int dw;
|
||||
int dh;
|
||||
};
|
||||
|
||||
struct Detection
|
||||
{
|
||||
int class_id;
|
||||
float confidence;
|
||||
cv::Rect box;
|
||||
};
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
class CommonObjectDetectorIntelImpl
|
||||
{
|
||||
public:
|
||||
CommonObjectDetectorIntelImpl();
|
||||
~CommonObjectDetectorIntelImpl();
|
||||
|
||||
bool intelSetup(CommonObjectDetectorBase *base_, bool input_4k_);
|
||||
void intelDetect(
|
||||
CommonObjectDetectorBase *base_,
|
||||
cv::Mat img_,
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_,
|
||||
bool input_4k_);
|
||||
void preprocess_img(cv::Mat &img_);
|
||||
void preprocess_img_seg(cv::Mat &img_, std::vector<float> &paddings);
|
||||
void postprocess_img_seg(cv::Mat &img_,
|
||||
std::vector<float> &paddings,
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_,
|
||||
double &thrs_conf,
|
||||
double &thrs_nms);
|
||||
|
||||
void postprocess_img(std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
double &thrs_conf,
|
||||
double &thrs_nms);
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
int inpWidth;
|
||||
int inpHeight;
|
||||
bool with_segmentation;
|
||||
float rx; // the width ratio of original image and resized image
|
||||
float ry; // the height ratio of original image and resized image
|
||||
Resize resize;
|
||||
ov::Tensor input_tensor;
|
||||
ov::InferRequest infer_request;
|
||||
ov::CompiledModel compiled_model;
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
|
@ -8,15 +8,24 @@
|
|||
#include "common_det_cuda_impl.h"
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#include "common_det_intel_impl.h"
|
||||
#endif
|
||||
|
||||
namespace sv {
|
||||
|
||||
|
||||
CommonObjectDetector::CommonObjectDetector()
|
||||
CommonObjectDetector::CommonObjectDetector(bool input_4k)
|
||||
{
|
||||
this->_input_4k = input_4k;
|
||||
#ifdef WITH_CUDA
|
||||
this->_cuda_impl = new CommonObjectDetectorCUDAImpl;
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl = new CommonObjectDetectorIntelImpl;
|
||||
#endif
|
||||
}
|
||||
CommonObjectDetector::~CommonObjectDetector()
|
||||
{
|
||||
|
@ -25,7 +34,11 @@ CommonObjectDetector::~CommonObjectDetector()
|
|||
bool CommonObjectDetector::setupImpl()
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
return this->_cuda_impl->cudaSetup(this);
|
||||
return this->_cuda_impl->cudaSetup(this, this->_input_4k);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
return this->_intel_impl->intelSetup(this, this->_input_4k);
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
@ -51,14 +64,24 @@ void CommonObjectDetector::detectImpl(
|
|||
boxes_h_,
|
||||
boxes_label_,
|
||||
boxes_score_,
|
||||
boxes_seg_
|
||||
);
|
||||
boxes_seg_,
|
||||
this->_input_4k);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl->intelDetect(
|
||||
this,
|
||||
img_,
|
||||
boxes_x_,
|
||||
boxes_y_,
|
||||
boxes_w_,
|
||||
boxes_h_,
|
||||
boxes_label_,
|
||||
boxes_score_,
|
||||
boxes_seg_,
|
||||
this->_input_4k);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "landing_det_cuda_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
@ -50,6 +51,16 @@ bool LandingMarkerDetectorCUDAImpl::cudaSetup()
|
|||
{
|
||||
#ifdef WITH_CUDA
|
||||
std::string trt_model_fn = get_home() + SV_MODEL_DIR + "LandingMarker.engine";
|
||||
|
||||
std::vector<std::string> files;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-LandingMarker-resnet34");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
trt_model_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
std::cout << "Load: " << trt_model_fn << std::endl;
|
||||
|
||||
if (!is_file_exist(trt_model_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the LandingMarker TensorRT model (File Not Exist)");
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
#include "landing_det_intel_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
using namespace dnn;
|
||||
#endif
|
||||
|
||||
LandingMarkerDetectorIntelImpl::LandingMarkerDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
LandingMarkerDetectorIntelImpl::~LandingMarkerDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
bool LandingMarkerDetectorIntelImpl::intelSetup()
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
std::string onnx_model_fn = get_home() + SV_MODEL_DIR + "LandingMarker.onnx";
|
||||
std::vector<std::string> files;
|
||||
list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-LandingMarker-resnet34");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
onnx_model_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
|
||||
if (!is_file_exist(onnx_model_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the LandingMarker ONNX model (File Not Exist)");
|
||||
}
|
||||
|
||||
// OpenVINO
|
||||
ov::Core core;
|
||||
std::shared_ptr<ov::Model> model = core.read_model(onnx_model_fn);
|
||||
ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);
|
||||
ppp.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::RGB);
|
||||
ppp.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255, 255, 255}); // .scale({ 112, 112, 112 });
|
||||
ppp.input().model().set_layout("NCHW");
|
||||
ppp.output().tensor().set_element_type(ov::element::f32);
|
||||
model = ppp.build();
|
||||
this->compiled_model = core.compile_model(model, "GPU");
|
||||
this->infer_request = compiled_model.create_infer_request();
|
||||
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void LandingMarkerDetectorIntelImpl::intelRoiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<int> &output_labels_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
output_labels_.clear();
|
||||
|
||||
for (int i = 0; i < input_rois_.size(); i++)
|
||||
{
|
||||
cv::Mat e_roi = input_rois_[i];
|
||||
|
||||
// Get input port for model with one input
|
||||
auto input_port = compiled_model.input();
|
||||
// Create tensor from external memory
|
||||
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), e_roi.ptr(0));
|
||||
// Set input tensor for model with one input
|
||||
infer_request.set_input_tensor(input_tensor);
|
||||
//preprocess_img(e_roi);
|
||||
|
||||
// infer_request.infer();
|
||||
infer_request.start_async();
|
||||
infer_request.wait();
|
||||
|
||||
const ov::Tensor &output_tensor = infer_request.get_output_tensor();
|
||||
ov::Shape output_shape = output_tensor.get_shape();
|
||||
this->_p_prob = output_tensor.data<float>();
|
||||
|
||||
// Find max index
|
||||
double max = 0;
|
||||
int label = 0;
|
||||
for (int i = 0; i < 11; ++i)
|
||||
{
|
||||
if (max < this->_p_prob[i])
|
||||
{
|
||||
max = this->_p_prob[i];
|
||||
label = i;
|
||||
}
|
||||
}
|
||||
output_labels_.push_back(label);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
#ifndef __SV_LANDING_DET_INTEL__
|
||||
#define __SV_LANDING_DET_INTEL__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/aruco.hpp>
|
||||
#include <opencv2/tracking.hpp>
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#endif
|
||||
|
||||
namespace sv
|
||||
{
|
||||
class LandingMarkerDetectorIntelImpl
|
||||
{
|
||||
public:
|
||||
LandingMarkerDetectorIntelImpl();
|
||||
~LandingMarkerDetectorIntelImpl();
|
||||
|
||||
bool intelSetup();
|
||||
void intelRoiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<int> &output_labels_);
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
float *_p_prob;
|
||||
|
||||
ov::Tensor input_tensor;
|
||||
ov::InferRequest infer_request;
|
||||
ov::CompiledModel compiled_model;
|
||||
#endif
|
||||
};
|
||||
}
|
||||
#endif
|
|
@ -7,6 +7,10 @@
|
|||
#include "landing_det_cuda_impl.h"
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#include "landing_det_intel_impl.h"
|
||||
#endif
|
||||
|
||||
namespace sv {
|
||||
|
||||
|
@ -16,6 +20,10 @@ LandingMarkerDetector::LandingMarkerDetector()
|
|||
#ifdef WITH_CUDA
|
||||
this->_cuda_impl = new LandingMarkerDetectorCUDAImpl;
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl = new LandingMarkerDetectorIntelImpl;
|
||||
#endif
|
||||
}
|
||||
LandingMarkerDetector::~LandingMarkerDetector()
|
||||
{
|
||||
|
@ -26,6 +34,10 @@ bool LandingMarkerDetector::setupImpl()
|
|||
#ifdef WITH_CUDA
|
||||
return this->_cuda_impl->cudaSetup();
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
return this->_intel_impl->intelSetup();
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -40,11 +52,13 @@ void LandingMarkerDetector::roiCNN(
|
|||
output_labels_
|
||||
);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl->intelRoiCNN(
|
||||
input_rois_,
|
||||
output_labels_
|
||||
);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,262 @@
|
|||
|
||||
#include "BYTETracker.h"
|
||||
#include <fstream>
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
BYTETracker::BYTETracker(int frame_rate, int track_buffer)
|
||||
{
|
||||
track_thresh = 0.5;
|
||||
high_thresh = 0.6;
|
||||
match_thresh = 0.8;
|
||||
|
||||
frame_id = 0;
|
||||
max_time_lost = int(frame_rate / 30.0 * track_buffer);
|
||||
}
|
||||
|
||||
BYTETracker::~BYTETracker()
|
||||
{
|
||||
}
|
||||
|
||||
void BYTETracker::update(TargetsInFrame &tgts)
|
||||
{
|
||||
////////////////// Step 1: Get detections //////////////////
|
||||
this->frame_id++;
|
||||
std::vector<STrack> activated_stracks;
|
||||
std::vector<STrack> refind_stracks;
|
||||
std::vector<STrack> removed_stracks;
|
||||
std::vector<STrack> lost_stracks;
|
||||
std::vector<STrack> detections;
|
||||
std::vector<STrack> detections_low;
|
||||
|
||||
std::vector<STrack> detections_cp;
|
||||
std::vector<STrack> tracked_stracks_swap;
|
||||
std::vector<STrack> resa, resb;
|
||||
std::vector<STrack> output_stracks;
|
||||
|
||||
std::vector<STrack *> unconfirmed;
|
||||
std::vector<STrack *> tracked_stracks;
|
||||
std::vector<STrack *> strack_pool;
|
||||
std::vector<STrack *> r_tracked_stracks;
|
||||
|
||||
if (tgts.targets.size() > 0)
|
||||
{
|
||||
for (int i = 0; i < tgts.targets.size(); i++)
|
||||
{
|
||||
|
||||
tgts.targets[i].tracked_id = 0;
|
||||
tgts.targets[i].has_tid = true;
|
||||
|
||||
std::vector<float> tlbr_;
|
||||
tlbr_.resize(4);
|
||||
tlbr_[0] = tgts.targets[i].cx * tgts.width - tgts.targets[i].w * tgts.width / 2;
|
||||
tlbr_[1] = tgts.targets[i].cy * tgts.height - tgts.targets[i].h * tgts.height / 2;
|
||||
tlbr_[2] = tgts.targets[i].cx * tgts.width + tgts.targets[i].w * tgts.width / 2;
|
||||
tlbr_[3] = tgts.targets[i].cy * tgts.height + tgts.targets[i].h * tgts.height / 2;
|
||||
|
||||
float score = tgts.targets[i].score;
|
||||
|
||||
STrack strack(STrack::tlbr_to_tlwh(tlbr_), score);
|
||||
if (score >= track_thresh)
|
||||
{
|
||||
detections.push_back(strack);
|
||||
}
|
||||
else
|
||||
{
|
||||
detections_low.push_back(strack);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add newly detected tracklets to tracked_stracks
|
||||
for (int i = 0; i < this->tracked_stracks.size(); i++)
|
||||
{
|
||||
if (!this->tracked_stracks[i].is_activated)
|
||||
unconfirmed.push_back(&this->tracked_stracks[i]);
|
||||
else
|
||||
tracked_stracks.push_back(&this->tracked_stracks[i]);
|
||||
}
|
||||
|
||||
////////////////// Step 2: First association, with IoU //////////////////
|
||||
strack_pool = joint_stracks(tracked_stracks, this->lost_stracks);
|
||||
STrack::multi_predict(strack_pool, this->kalman_filter);
|
||||
|
||||
std::vector<std::vector<float>> dists;
|
||||
int dist_size = 0, dist_size_size = 0;
|
||||
dists = iou_distance(strack_pool, detections, dist_size, dist_size_size);
|
||||
|
||||
std::vector<std::vector<int>> matches;
|
||||
std::vector<int> u_track, u_detection;
|
||||
linear_assignment(dists, dist_size, dist_size_size, match_thresh, matches, u_track, u_detection);
|
||||
|
||||
for (int i = 0; i < matches.size(); i++)
|
||||
{
|
||||
STrack *track = strack_pool[matches[i][0]];
|
||||
STrack *det = &detections[matches[i][1]];
|
||||
if (track->state == TrackState::Tracked)
|
||||
{
|
||||
track->update(*det, this->frame_id);
|
||||
activated_stracks.push_back(*track);
|
||||
}
|
||||
else
|
||||
{
|
||||
track->re_activate(*det, this->frame_id, false);
|
||||
refind_stracks.push_back(*track);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////// Step 3: Second association, using low score dets //////////////////
|
||||
for (int i = 0; i < u_detection.size(); i++)
|
||||
{
|
||||
detections_cp.push_back(detections[u_detection[i]]);
|
||||
}
|
||||
detections.clear();
|
||||
detections.assign(detections_low.begin(), detections_low.end());
|
||||
|
||||
for (int i = 0; i < u_track.size(); i++)
|
||||
{
|
||||
if (strack_pool[u_track[i]]->state == TrackState::Tracked)
|
||||
{
|
||||
r_tracked_stracks.push_back(strack_pool[u_track[i]]);
|
||||
}
|
||||
}
|
||||
|
||||
dists.clear();
|
||||
dists = iou_distance(r_tracked_stracks, detections, dist_size, dist_size_size);
|
||||
|
||||
matches.clear();
|
||||
u_track.clear();
|
||||
u_detection.clear();
|
||||
linear_assignment(dists, dist_size, dist_size_size, 0.5, matches, u_track, u_detection);
|
||||
|
||||
for (int i = 0; i < matches.size(); i++)
|
||||
{
|
||||
STrack *track = r_tracked_stracks[matches[i][0]];
|
||||
STrack *det = &detections[matches[i][1]];
|
||||
if (track->state == TrackState::Tracked)
|
||||
{
|
||||
track->update(*det, this->frame_id);
|
||||
activated_stracks.push_back(*track);
|
||||
}
|
||||
else
|
||||
{
|
||||
track->re_activate(*det, this->frame_id, false);
|
||||
refind_stracks.push_back(*track);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < u_track.size(); i++)
|
||||
{
|
||||
STrack *track = r_tracked_stracks[u_track[i]];
|
||||
if (track->state != TrackState::Lost)
|
||||
{
|
||||
track->mark_lost();
|
||||
lost_stracks.push_back(*track);
|
||||
}
|
||||
}
|
||||
|
||||
// Deal with unconfirmed tracks, usually tracks with only one beginning frame
|
||||
detections.clear();
|
||||
detections.assign(detections_cp.begin(), detections_cp.end());
|
||||
|
||||
dists.clear();
|
||||
dists = iou_distance(unconfirmed, detections, dist_size, dist_size_size);
|
||||
|
||||
matches.clear();
|
||||
std::vector<int> u_unconfirmed;
|
||||
u_detection.clear();
|
||||
linear_assignment(dists, dist_size, dist_size_size, 0.7, matches, u_unconfirmed, u_detection);
|
||||
|
||||
for (int i = 0; i < matches.size(); i++)
|
||||
{
|
||||
unconfirmed[matches[i][0]]->update(detections[matches[i][1]], this->frame_id);
|
||||
activated_stracks.push_back(*unconfirmed[matches[i][0]]);
|
||||
}
|
||||
|
||||
for (int i = 0; i < u_unconfirmed.size(); i++)
|
||||
{
|
||||
STrack *track = unconfirmed[u_unconfirmed[i]];
|
||||
track->mark_removed();
|
||||
removed_stracks.push_back(*track);
|
||||
}
|
||||
|
||||
////////////////// Step 4: Init new stracks //////////////////
|
||||
for (int i = 0; i < u_detection.size(); i++)
|
||||
{
|
||||
STrack *track = &detections[u_detection[i]];
|
||||
if (track->score < this->high_thresh)
|
||||
continue;
|
||||
track->activate(this->kalman_filter, this->frame_id);
|
||||
activated_stracks.push_back(*track);
|
||||
}
|
||||
|
||||
////////////////// Step 5: Update state //////////////////
|
||||
for (int i = 0; i < this->lost_stracks.size(); i++)
|
||||
{
|
||||
if (this->frame_id - this->lost_stracks[i].end_frame() > this->max_time_lost)
|
||||
{
|
||||
this->lost_stracks[i].mark_removed();
|
||||
removed_stracks.push_back(this->lost_stracks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < this->tracked_stracks.size(); i++)
|
||||
{
|
||||
if (this->tracked_stracks[i].state == TrackState::Tracked)
|
||||
{
|
||||
tracked_stracks_swap.push_back(this->tracked_stracks[i]);
|
||||
}
|
||||
}
|
||||
this->tracked_stracks.clear();
|
||||
this->tracked_stracks.assign(tracked_stracks_swap.begin(), tracked_stracks_swap.end());
|
||||
|
||||
this->tracked_stracks = joint_stracks(this->tracked_stracks, activated_stracks);
|
||||
this->tracked_stracks = joint_stracks(this->tracked_stracks, refind_stracks);
|
||||
|
||||
// std::cout << activated_stracks.size() << std::endl;
|
||||
|
||||
this->lost_stracks = sub_stracks(this->lost_stracks, this->tracked_stracks);
|
||||
for (int i = 0; i < lost_stracks.size(); i++)
|
||||
{
|
||||
this->lost_stracks.push_back(lost_stracks[i]);
|
||||
}
|
||||
|
||||
this->lost_stracks = sub_stracks(this->lost_stracks, this->removed_stracks);
|
||||
for (int i = 0; i < removed_stracks.size(); i++)
|
||||
{
|
||||
this->removed_stracks.push_back(removed_stracks[i]);
|
||||
}
|
||||
|
||||
remove_duplicate_stracks(resa, resb, this->tracked_stracks, this->lost_stracks);
|
||||
|
||||
this->tracked_stracks.clear();
|
||||
this->tracked_stracks.assign(resa.begin(), resa.end());
|
||||
this->lost_stracks.clear();
|
||||
this->lost_stracks.assign(resb.begin(), resb.end());
|
||||
|
||||
for (int i = 0; i < this->tracked_stracks.size(); i++)
|
||||
{
|
||||
if (this->tracked_stracks[i].is_activated)
|
||||
{
|
||||
output_stracks.push_back(this->tracked_stracks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
tgts.targets.clear();
|
||||
|
||||
for (unsigned long i = 0; i < output_stracks.size(); i++)
|
||||
{
|
||||
std::vector<float> tlwh = output_stracks[i].tlwh;
|
||||
bool vertical = tlwh[2] / tlwh[3] > 1.6;
|
||||
|
||||
if (tlwh[2] * tlwh[3] > 20 && !vertical)
|
||||
{
|
||||
Target tgt;
|
||||
tgt.setBox(tlwh[0], tlwh[1], tlwh[0] + tlwh[2], tlwh[1] + tlwh[3], tgts.width, tgts.height);
|
||||
tgt.setTrackID(output_stracks[i].track_id);
|
||||
tgts.targets.push_back(tgt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
#ifndef __SV_BYTETrack__
|
||||
#define __SV_BYTETrack__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include "STrack.h"
|
||||
|
||||
class detect_result
|
||||
{
|
||||
public:
|
||||
int classId;
|
||||
float confidence;
|
||||
cv::Rect_<float> box;
|
||||
};
|
||||
|
||||
namespace sv {
|
||||
|
||||
class BYTETracker
|
||||
{
|
||||
public:
|
||||
BYTETracker(int frame_rate = 30, int track_buffer = 30);
|
||||
~BYTETracker();
|
||||
|
||||
void update(TargetsInFrame &tgts);
|
||||
cv::Scalar get_color(int idx);
|
||||
|
||||
private:
|
||||
std::vector<STrack*> joint_stracks( std::vector<STrack*> &tlista, std::vector<STrack> &tlistb);
|
||||
std::vector<STrack> joint_stracks( std::vector<STrack> &tlista, std::vector<STrack> &tlistb);
|
||||
|
||||
std::vector<STrack> sub_stracks( std::vector<STrack> &tlista, std::vector<STrack> &tlistb);
|
||||
void remove_duplicate_stracks( std::vector<STrack> &resa, std::vector<STrack> &resb, std::vector<STrack> &stracksa, std::vector<STrack> &stracksb);
|
||||
|
||||
void linear_assignment( std::vector< std::vector<float> > &cost_matrix, int cost_matrix_size, int cost_matrix_size_size, float thresh,
|
||||
std::vector< std::vector<int> > &matches, std::vector<int> &unmatched_a, std::vector<int> &unmatched_b);
|
||||
std::vector< std::vector<float> > iou_distance( std::vector<STrack*> &atracks, std::vector<STrack> &btracks, int &dist_size, int &dist_size_size);
|
||||
std::vector< std::vector<float> > iou_distance( std::vector<STrack> &atracks, std::vector<STrack> &btracks);
|
||||
std::vector< std::vector<float> > ious( std::vector< std::vector<float> > &atlbrs, std::vector< std::vector<float> > &btlbrs);
|
||||
|
||||
double lapjv(const std::vector< std::vector<float> > &cost, std::vector<int> &rowsol, std::vector<int> &colsol,
|
||||
bool extend_cost = false, float cost_limit = LONG_MAX, bool return_cost = true);
|
||||
|
||||
private:
|
||||
|
||||
float track_thresh;
|
||||
float high_thresh;
|
||||
float match_thresh;
|
||||
int frame_id;
|
||||
int max_time_lost;
|
||||
|
||||
std::vector<STrack> tracked_stracks;
|
||||
std::vector<STrack> lost_stracks;
|
||||
std::vector<STrack> removed_stracks;
|
||||
byte_kalman::ByteKalmanFilter kalman_filter;
|
||||
};
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,152 @@
|
|||
#include "BytekalmanFilter.h"
|
||||
#include <Eigen/Cholesky>
|
||||
|
||||
namespace byte_kalman
|
||||
{
|
||||
const double ByteKalmanFilter::chi2inv95[10] = {
|
||||
0,
|
||||
3.8415,
|
||||
5.9915,
|
||||
7.8147,
|
||||
9.4877,
|
||||
11.070,
|
||||
12.592,
|
||||
14.067,
|
||||
15.507,
|
||||
16.919
|
||||
};
|
||||
ByteKalmanFilter::ByteKalmanFilter()
|
||||
{
|
||||
int ndim = 4;
|
||||
double dt = 1.;
|
||||
|
||||
_motion_mat = Eigen::MatrixXf::Identity(8, 8);
|
||||
for (int i = 0; i < ndim; i++) {
|
||||
_motion_mat(i, ndim + i) = dt;
|
||||
}
|
||||
_update_mat = Eigen::MatrixXf::Identity(4, 8);
|
||||
|
||||
this->_std_weight_position = 1. / 20;
|
||||
this->_std_weight_velocity = 1. / 160;
|
||||
}
|
||||
|
||||
KAL_DATA ByteKalmanFilter::initiate(const DETECTBOX &measurement)
|
||||
{
|
||||
DETECTBOX mean_pos = measurement;
|
||||
DETECTBOX mean_vel;
|
||||
for (int i = 0; i < 4; i++) mean_vel(i) = 0;
|
||||
|
||||
KAL_MEAN mean;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
if (i < 4) mean(i) = mean_pos(i);
|
||||
else mean(i) = mean_vel(i - 4);
|
||||
}
|
||||
|
||||
KAL_MEAN std;
|
||||
std(0) = 2 * _std_weight_position * measurement[3];
|
||||
std(1) = 2 * _std_weight_position * measurement[3];
|
||||
std(2) = 1e-2;
|
||||
std(3) = 2 * _std_weight_position * measurement[3];
|
||||
std(4) = 10 * _std_weight_velocity * measurement[3];
|
||||
std(5) = 10 * _std_weight_velocity * measurement[3];
|
||||
std(6) = 1e-5;
|
||||
std(7) = 10 * _std_weight_velocity * measurement[3];
|
||||
|
||||
KAL_MEAN tmp = std.array().square();
|
||||
KAL_COVA var = tmp.asDiagonal();
|
||||
return std::make_pair(mean, var);
|
||||
}
|
||||
|
||||
void ByteKalmanFilter::predict(KAL_MEAN &mean, KAL_COVA &covariance)
|
||||
{
|
||||
//revise the data;
|
||||
DETECTBOX std_pos;
|
||||
std_pos << _std_weight_position * mean(3),
|
||||
_std_weight_position * mean(3),
|
||||
1e-2,
|
||||
_std_weight_position * mean(3);
|
||||
DETECTBOX std_vel;
|
||||
std_vel << _std_weight_velocity * mean(3),
|
||||
_std_weight_velocity * mean(3),
|
||||
1e-5,
|
||||
_std_weight_velocity * mean(3);
|
||||
KAL_MEAN tmp;
|
||||
tmp.block<1, 4>(0, 0) = std_pos;
|
||||
tmp.block<1, 4>(0, 4) = std_vel;
|
||||
tmp = tmp.array().square();
|
||||
KAL_COVA motion_cov = tmp.asDiagonal();
|
||||
KAL_MEAN mean1 = this->_motion_mat * mean.transpose();
|
||||
KAL_COVA covariance1 = this->_motion_mat * covariance *(_motion_mat.transpose());
|
||||
covariance1 += motion_cov;
|
||||
|
||||
mean = mean1;
|
||||
covariance = covariance1;
|
||||
}
|
||||
|
||||
KAL_HDATA ByteKalmanFilter::project(const KAL_MEAN &mean, const KAL_COVA &covariance)
|
||||
{
|
||||
DETECTBOX std;
|
||||
std << _std_weight_position * mean(3), _std_weight_position * mean(3),
|
||||
1e-1, _std_weight_position * mean(3);
|
||||
KAL_HMEAN mean1 = _update_mat * mean.transpose();
|
||||
KAL_HCOVA covariance1 = _update_mat * covariance * (_update_mat.transpose());
|
||||
Eigen::Matrix<float, 4, 4> diag = std.asDiagonal();
|
||||
diag = diag.array().square().matrix();
|
||||
covariance1 += diag;
|
||||
// covariance1.diagonal() << diag;
|
||||
return std::make_pair(mean1, covariance1);
|
||||
}
|
||||
|
||||
KAL_DATA
|
||||
ByteKalmanFilter::update(
|
||||
const KAL_MEAN &mean,
|
||||
const KAL_COVA &covariance,
|
||||
const DETECTBOX &measurement)
|
||||
{
|
||||
KAL_HDATA pa = project(mean, covariance);
|
||||
KAL_HMEAN projected_mean = pa.first;
|
||||
KAL_HCOVA projected_cov = pa.second;
|
||||
|
||||
//chol_factor, lower =
|
||||
//scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
|
||||
//kalmain_gain =
|
||||
//scipy.linalg.cho_solve((cho_factor, lower),
|
||||
//np.dot(covariance, self._upadte_mat.T).T,
|
||||
//check_finite=False).T
|
||||
Eigen::Matrix<float, 4, 8> B = (covariance * (_update_mat.transpose())).transpose();
|
||||
Eigen::Matrix<float, 8, 4> kalman_gain = (projected_cov.llt().solve(B)).transpose(); // eg.8x4
|
||||
Eigen::Matrix<float, 1, 4> innovation = measurement - projected_mean; //eg.1x4
|
||||
auto tmp = innovation * (kalman_gain.transpose());
|
||||
KAL_MEAN new_mean = (mean.array() + tmp.array()).matrix();
|
||||
KAL_COVA new_covariance = covariance - kalman_gain * projected_cov*(kalman_gain.transpose());
|
||||
return std::make_pair(new_mean, new_covariance);
|
||||
}
|
||||
|
||||
Eigen::Matrix<float, 1, -1>
|
||||
ByteKalmanFilter::gating_distance(
|
||||
const KAL_MEAN &mean,
|
||||
const KAL_COVA &covariance,
|
||||
const std::vector<DETECTBOX> &measurements,
|
||||
bool only_position)
|
||||
{
|
||||
KAL_HDATA pa = this->project(mean, covariance);
|
||||
if (only_position) {
|
||||
printf("not implement!");
|
||||
exit(0);
|
||||
}
|
||||
KAL_HMEAN mean1 = pa.first;
|
||||
KAL_HCOVA covariance1 = pa.second;
|
||||
|
||||
// Eigen::Matrix<float, -1, 4, Eigen::RowMajor> d(size, 4);
|
||||
DETECTBOXSS d(measurements.size(), 4);
|
||||
int pos = 0;
|
||||
for (DETECTBOX box : measurements) {
|
||||
d.row(pos++) = box - mean1;
|
||||
}
|
||||
Eigen::Matrix<float, -1, -1, Eigen::RowMajor> factor = covariance1.llt().matrixL();
|
||||
Eigen::Matrix<float, -1, -1> z = factor.triangularView<Eigen::Lower>().solve<Eigen::OnTheRight>(d).transpose();
|
||||
auto zz = ((z.array())*(z.array())).matrix();
|
||||
auto square_maha = zz.colwise().sum();
|
||||
return square_maha;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
#pragma once
|
||||
|
||||
#include "dataType.h"
|
||||
|
||||
namespace byte_kalman
|
||||
{
|
||||
class ByteKalmanFilter
|
||||
{
|
||||
public:
|
||||
static const double chi2inv95[10];
|
||||
ByteKalmanFilter();
|
||||
KAL_DATA initiate(const DETECTBOX& measurement);
|
||||
void predict(KAL_MEAN& mean, KAL_COVA& covariance);
|
||||
KAL_HDATA project(const KAL_MEAN& mean, const KAL_COVA& covariance);
|
||||
KAL_DATA update(const KAL_MEAN& mean,
|
||||
const KAL_COVA& covariance,
|
||||
const DETECTBOX& measurement);
|
||||
|
||||
Eigen::Matrix<float, 1, -1> gating_distance(
|
||||
const KAL_MEAN& mean,
|
||||
const KAL_COVA& covariance,
|
||||
const std::vector<DETECTBOX>& measurements,
|
||||
bool only_position = false);
|
||||
|
||||
private:
|
||||
Eigen::Matrix<float, 8, 8, Eigen::RowMajor> _motion_mat;
|
||||
Eigen::Matrix<float, 4, 8, Eigen::RowMajor> _update_mat;
|
||||
float _std_weight_position;
|
||||
float _std_weight_velocity;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,192 @@
|
|||
#include "STrack.h"
|
||||
|
||||
STrack::STrack( std::vector<float> tlwh_, float score)
|
||||
{
|
||||
_tlwh.resize(4);
|
||||
_tlwh.assign(tlwh_.begin(), tlwh_.end());
|
||||
|
||||
is_activated = false;
|
||||
track_id = 0;
|
||||
state = TrackState::New;
|
||||
|
||||
tlwh.resize(4);
|
||||
tlbr.resize(4);
|
||||
|
||||
static_tlwh();
|
||||
static_tlbr();
|
||||
frame_id = 0;
|
||||
tracklet_len = 0;
|
||||
this->score = score;
|
||||
start_frame = 0;
|
||||
}
|
||||
|
||||
STrack::~STrack()
|
||||
{
|
||||
}
|
||||
|
||||
void STrack::activate(byte_kalman::ByteKalmanFilter &kalman_filter, int frame_id)
|
||||
{
|
||||
this->kalman_filter = kalman_filter;
|
||||
this->track_id = this->next_id();
|
||||
|
||||
std::vector<float> _tlwh_tmp(4);
|
||||
_tlwh_tmp[0] = this->_tlwh[0];
|
||||
_tlwh_tmp[1] = this->_tlwh[1];
|
||||
_tlwh_tmp[2] = this->_tlwh[2];
|
||||
_tlwh_tmp[3] = this->_tlwh[3];
|
||||
std::vector<float> xyah = tlwh_to_xyah(_tlwh_tmp);
|
||||
DETECTBOX xyah_box;
|
||||
xyah_box[0] = xyah[0];
|
||||
xyah_box[1] = xyah[1];
|
||||
xyah_box[2] = xyah[2];
|
||||
xyah_box[3] = xyah[3];
|
||||
auto mc = this->kalman_filter.initiate(xyah_box);
|
||||
this->mean = mc.first;
|
||||
this->covariance = mc.second;
|
||||
|
||||
static_tlwh();
|
||||
static_tlbr();
|
||||
|
||||
this->tracklet_len = 0;
|
||||
this->state = TrackState::Tracked;
|
||||
if (frame_id == 1)
|
||||
{
|
||||
this->is_activated = true;
|
||||
}
|
||||
//this->is_activated = true;
|
||||
this->frame_id = frame_id;
|
||||
this->start_frame = frame_id;
|
||||
}
|
||||
|
||||
void STrack::re_activate(STrack &new_track, int frame_id, bool new_id)
|
||||
{
|
||||
std::vector<float> xyah = tlwh_to_xyah(new_track.tlwh);
|
||||
DETECTBOX xyah_box;
|
||||
xyah_box[0] = xyah[0];
|
||||
xyah_box[1] = xyah[1];
|
||||
xyah_box[2] = xyah[2];
|
||||
xyah_box[3] = xyah[3];
|
||||
auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box);
|
||||
this->mean = mc.first;
|
||||
this->covariance = mc.second;
|
||||
|
||||
static_tlwh();
|
||||
static_tlbr();
|
||||
|
||||
this->tracklet_len = 0;
|
||||
this->state = TrackState::Tracked;
|
||||
this->is_activated = true;
|
||||
this->frame_id = frame_id;
|
||||
this->score = new_track.score;
|
||||
if (new_id)
|
||||
this->track_id = next_id();
|
||||
}
|
||||
|
||||
void STrack::update(STrack &new_track, int frame_id)
|
||||
{
|
||||
this->frame_id = frame_id;
|
||||
this->tracklet_len++;
|
||||
|
||||
std::vector<float> xyah = tlwh_to_xyah(new_track.tlwh);
|
||||
DETECTBOX xyah_box;
|
||||
xyah_box[0] = xyah[0];
|
||||
xyah_box[1] = xyah[1];
|
||||
xyah_box[2] = xyah[2];
|
||||
xyah_box[3] = xyah[3];
|
||||
|
||||
auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box);
|
||||
this->mean = mc.first;
|
||||
this->covariance = mc.second;
|
||||
|
||||
static_tlwh();
|
||||
static_tlbr();
|
||||
|
||||
this->state = TrackState::Tracked;
|
||||
this->is_activated = true;
|
||||
|
||||
this->score = new_track.score;
|
||||
}
|
||||
|
||||
void STrack::static_tlwh()
|
||||
{
|
||||
if (this->state == TrackState::New)
|
||||
{
|
||||
tlwh[0] = _tlwh[0];
|
||||
tlwh[1] = _tlwh[1];
|
||||
tlwh[2] = _tlwh[2];
|
||||
tlwh[3] = _tlwh[3];
|
||||
return;
|
||||
}
|
||||
|
||||
tlwh[0] = mean[0];
|
||||
tlwh[1] = mean[1];
|
||||
tlwh[2] = mean[2];
|
||||
tlwh[3] = mean[3];
|
||||
|
||||
tlwh[2] *= tlwh[3];
|
||||
tlwh[0] -= tlwh[2] / 2;
|
||||
tlwh[1] -= tlwh[3] / 2;
|
||||
}
|
||||
|
||||
void STrack::static_tlbr()
|
||||
{
|
||||
tlbr.clear();
|
||||
tlbr.assign(tlwh.begin(), tlwh.end());
|
||||
tlbr[2] += tlbr[0];
|
||||
tlbr[3] += tlbr[1];
|
||||
}
|
||||
|
||||
std::vector<float> STrack::tlwh_to_xyah( std::vector<float> tlwh_tmp)
|
||||
{
|
||||
std::vector<float> tlwh_output = tlwh_tmp;
|
||||
tlwh_output[0] += tlwh_output[2] / 2;
|
||||
tlwh_output[1] += tlwh_output[3] / 2;
|
||||
tlwh_output[2] /= tlwh_output[3];
|
||||
return tlwh_output;
|
||||
}
|
||||
|
||||
std::vector<float> STrack::to_xyah()
|
||||
{
|
||||
return tlwh_to_xyah(tlwh);
|
||||
}
|
||||
|
||||
std::vector<float> STrack::tlbr_to_tlwh( std::vector<float> &tlbr)
|
||||
{
|
||||
tlbr[2] -= tlbr[0];
|
||||
tlbr[3] -= tlbr[1];
|
||||
return tlbr;
|
||||
}
|
||||
|
||||
void STrack::mark_lost()
|
||||
{
|
||||
state = TrackState::Lost;
|
||||
}
|
||||
|
||||
void STrack::mark_removed()
|
||||
{
|
||||
state = TrackState::Removed;
|
||||
}
|
||||
|
||||
int STrack::next_id()
|
||||
{
|
||||
static int _count = 0;
|
||||
_count++;
|
||||
return _count;
|
||||
}
|
||||
|
||||
int STrack::end_frame()
|
||||
{
|
||||
return this->frame_id;
|
||||
}
|
||||
|
||||
void STrack::multi_predict( std::vector<STrack*> &stracks, byte_kalman::ByteKalmanFilter &kalman_filter)
|
||||
{
|
||||
for (int i = 0; i < stracks.size(); i++)
|
||||
{
|
||||
if (stracks[i]->state != TrackState::Tracked)
|
||||
{
|
||||
stracks[i]->mean[7] = 0;
|
||||
}
|
||||
kalman_filter.predict(stracks[i]->mean, stracks[i]->covariance);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
#pragma once
|
||||
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include "BytekalmanFilter.h"
|
||||
|
||||
enum TrackState { New = 0, Tracked, Lost, Removed };
|
||||
|
||||
class STrack
|
||||
{
|
||||
public:
|
||||
STrack( std::vector<float> tlwh_, float score);
|
||||
~STrack();
|
||||
|
||||
std::vector<float> static tlbr_to_tlwh( std::vector<float> &tlbr);
|
||||
void static multi_predict( std::vector<STrack*> &stracks, byte_kalman::ByteKalmanFilter &kalman_filter);
|
||||
void static_tlwh();
|
||||
void static_tlbr();
|
||||
std::vector<float> tlwh_to_xyah( std::vector<float> tlwh_tmp);
|
||||
std::vector<float> to_xyah();
|
||||
void mark_lost();
|
||||
void mark_removed();
|
||||
int next_id();
|
||||
int end_frame();
|
||||
|
||||
void activate(byte_kalman::ByteKalmanFilter &kalman_filter, int frame_id);
|
||||
void re_activate(STrack &new_track, int frame_id, bool new_id = false);
|
||||
void update(STrack &new_track, int frame_id);
|
||||
|
||||
public:
|
||||
bool is_activated;
|
||||
int track_id;
|
||||
int state;
|
||||
|
||||
std::vector<float> _tlwh;
|
||||
std::vector<float> tlwh;
|
||||
std::vector<float> tlbr;
|
||||
int frame_id;
|
||||
int tracklet_len;
|
||||
int start_frame;
|
||||
|
||||
KAL_MEAN mean;
|
||||
KAL_COVA covariance;
|
||||
float score;
|
||||
|
||||
private:
|
||||
byte_kalman::ByteKalmanFilter kalman_filter;
|
||||
};
|
|
@ -0,0 +1,27 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
|
||||
|
||||
typedef Eigen::Matrix<float, 1, 4, Eigen::RowMajor> DETECTBOX;
|
||||
typedef Eigen::Matrix<float, -1, 4, Eigen::RowMajor> DETECTBOXSS;
|
||||
|
||||
|
||||
//Kalmanfilter
|
||||
typedef Eigen::Matrix<float, 8, 8, Eigen::RowMajor> KAL_FILTER;
|
||||
typedef Eigen::Matrix<float, 1, 8, Eigen::RowMajor> KAL_MEAN;
|
||||
typedef Eigen::Matrix<float, 8, 8, Eigen::RowMajor> KAL_COVA;
|
||||
typedef Eigen::Matrix<float, 1, 4, Eigen::RowMajor> KAL_HMEAN;
|
||||
typedef Eigen::Matrix<float, 4, 4, Eigen::RowMajor> KAL_HCOVA;
|
||||
using KAL_DATA = std::pair<KAL_MEAN, KAL_COVA>;
|
||||
using KAL_HDATA = std::pair<KAL_HMEAN, KAL_HCOVA>;
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,343 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "lapjv.h"
|
||||
|
||||
/** Column-reduction and reduction transfer for a dense cost matrix.
|
||||
*/
|
||||
int_t _ccrrt_dense(const uint_t n, cost_t *cost[],
|
||||
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
|
||||
{
|
||||
int_t n_free_rows;
|
||||
boolean *unique;
|
||||
|
||||
for (uint_t i = 0; i < n; i++) {
|
||||
x[i] = -1;
|
||||
v[i] = LARGE;
|
||||
y[i] = 0;
|
||||
}
|
||||
for (uint_t i = 0; i < n; i++) {
|
||||
for (uint_t j = 0; j < n; j++) {
|
||||
const cost_t c = cost[i][j];
|
||||
if (c < v[j]) {
|
||||
v[j] = c;
|
||||
y[j] = i;
|
||||
}
|
||||
PRINTF("i=%d, j=%d, c[i,j]=%f, v[j]=%f y[j]=%d\n", i, j, c, v[j], y[j]);
|
||||
}
|
||||
}
|
||||
PRINT_COST_ARRAY(v, n);
|
||||
PRINT_INDEX_ARRAY(y, n);
|
||||
NEW(unique, boolean, n);
|
||||
memset(unique, TRUE, n);
|
||||
{
|
||||
int_t j = n;
|
||||
do {
|
||||
j--;
|
||||
const int_t i = y[j];
|
||||
if (x[i] < 0) {
|
||||
x[i] = j;
|
||||
}
|
||||
else {
|
||||
unique[i] = FALSE;
|
||||
y[j] = -1;
|
||||
}
|
||||
} while (j > 0);
|
||||
}
|
||||
n_free_rows = 0;
|
||||
for (uint_t i = 0; i < n; i++) {
|
||||
if (x[i] < 0) {
|
||||
free_rows[n_free_rows++] = i;
|
||||
}
|
||||
else if (unique[i]) {
|
||||
const int_t j = x[i];
|
||||
cost_t min = LARGE;
|
||||
for (uint_t j2 = 0; j2 < n; j2++) {
|
||||
if (j2 == (uint_t)j) {
|
||||
continue;
|
||||
}
|
||||
const cost_t c = cost[i][j2] - v[j2];
|
||||
if (c < min) {
|
||||
min = c;
|
||||
}
|
||||
}
|
||||
PRINTF("v[%d] = %f - %f\n", j, v[j], min);
|
||||
v[j] -= min;
|
||||
}
|
||||
}
|
||||
FREE(unique);
|
||||
return n_free_rows;
|
||||
}
|
||||
|
||||
|
||||
/** Augmenting row reduction for a dense cost matrix.
|
||||
*/
|
||||
int_t _carr_dense(
|
||||
const uint_t n, cost_t *cost[],
|
||||
const uint_t n_free_rows,
|
||||
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
|
||||
{
|
||||
uint_t current = 0;
|
||||
int_t new_free_rows = 0;
|
||||
uint_t rr_cnt = 0;
|
||||
PRINT_INDEX_ARRAY(x, n);
|
||||
PRINT_INDEX_ARRAY(y, n);
|
||||
PRINT_COST_ARRAY(v, n);
|
||||
PRINT_INDEX_ARRAY(free_rows, n_free_rows);
|
||||
while (current < n_free_rows) {
|
||||
int_t i0;
|
||||
int_t j1, j2;
|
||||
cost_t v1, v2, v1_new;
|
||||
boolean v1_lowers;
|
||||
|
||||
rr_cnt++;
|
||||
PRINTF("current = %d rr_cnt = %d\n", current, rr_cnt);
|
||||
const int_t free_i = free_rows[current++];
|
||||
j1 = 0;
|
||||
v1 = cost[free_i][0] - v[0];
|
||||
j2 = -1;
|
||||
v2 = LARGE;
|
||||
for (uint_t j = 1; j < n; j++) {
|
||||
PRINTF("%d = %f %d = %f\n", j1, v1, j2, v2);
|
||||
const cost_t c = cost[free_i][j] - v[j];
|
||||
if (c < v2) {
|
||||
if (c >= v1) {
|
||||
v2 = c;
|
||||
j2 = j;
|
||||
}
|
||||
else {
|
||||
v2 = v1;
|
||||
v1 = c;
|
||||
j2 = j1;
|
||||
j1 = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
i0 = y[j1];
|
||||
v1_new = v[j1] - (v2 - v1);
|
||||
v1_lowers = v1_new < v[j1];
|
||||
PRINTF("%d %d 1=%d,%f 2=%d,%f v1'=%f(%d,%g) \n", free_i, i0, j1, v1, j2, v2, v1_new, v1_lowers, v[j1] - v1_new);
|
||||
if (rr_cnt < current * n) {
|
||||
if (v1_lowers) {
|
||||
v[j1] = v1_new;
|
||||
}
|
||||
else if (i0 >= 0 && j2 >= 0) {
|
||||
j1 = j2;
|
||||
i0 = y[j2];
|
||||
}
|
||||
if (i0 >= 0) {
|
||||
if (v1_lowers) {
|
||||
free_rows[--current] = i0;
|
||||
}
|
||||
else {
|
||||
free_rows[new_free_rows++] = i0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
PRINTF("rr_cnt=%d >= %d (current=%d * n=%d)\n", rr_cnt, current * n, current, n);
|
||||
if (i0 >= 0) {
|
||||
free_rows[new_free_rows++] = i0;
|
||||
}
|
||||
}
|
||||
x[free_i] = j1;
|
||||
y[j1] = free_i;
|
||||
}
|
||||
return new_free_rows;
|
||||
}
|
||||
|
||||
|
||||
/** Find columns with minimum d[j] and put them on the SCAN list.
|
||||
*/
|
||||
uint_t _find_dense(const uint_t n, uint_t lo, cost_t *d, int_t *cols, int_t *y)
|
||||
{
|
||||
uint_t hi = lo + 1;
|
||||
cost_t mind = d[cols[lo]];
|
||||
for (uint_t k = hi; k < n; k++) {
|
||||
int_t j = cols[k];
|
||||
if (d[j] <= mind) {
|
||||
if (d[j] < mind) {
|
||||
hi = lo;
|
||||
mind = d[j];
|
||||
}
|
||||
cols[k] = cols[hi];
|
||||
cols[hi++] = j;
|
||||
}
|
||||
}
|
||||
return hi;
|
||||
}
|
||||
|
||||
|
||||
// Scan all columns in TODO starting from arbitrary column in SCAN
|
||||
// and try to decrease d of the TODO columns using the SCAN column.
|
||||
int_t _scan_dense(const uint_t n, cost_t *cost[],
|
||||
uint_t *plo, uint_t*phi,
|
||||
cost_t *d, int_t *cols, int_t *pred,
|
||||
int_t *y, cost_t *v)
|
||||
{
|
||||
uint_t lo = *plo;
|
||||
uint_t hi = *phi;
|
||||
cost_t h, cred_ij;
|
||||
|
||||
while (lo != hi) {
|
||||
int_t j = cols[lo++];
|
||||
const int_t i = y[j];
|
||||
const cost_t mind = d[j];
|
||||
h = cost[i][j] - v[j] - mind;
|
||||
PRINTF("i=%d j=%d h=%f\n", i, j, h);
|
||||
// For all columns in TODO
|
||||
for (uint_t k = hi; k < n; k++) {
|
||||
j = cols[k];
|
||||
cred_ij = cost[i][j] - v[j] - h;
|
||||
if (cred_ij < d[j]) {
|
||||
d[j] = cred_ij;
|
||||
pred[j] = i;
|
||||
if (cred_ij == mind) {
|
||||
if (y[j] < 0) {
|
||||
return j;
|
||||
}
|
||||
cols[k] = cols[hi];
|
||||
cols[hi++] = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*plo = lo;
|
||||
*phi = hi;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/** Single iteration of modified Dijkstra shortest path algorithm as explained in the JV paper.
|
||||
*
|
||||
* This is a dense matrix version.
|
||||
*
|
||||
* \return The closest free column index.
|
||||
*/
|
||||
int_t find_path_dense(
|
||||
const uint_t n, cost_t *cost[],
|
||||
const int_t start_i,
|
||||
int_t *y, cost_t *v,
|
||||
int_t *pred)
|
||||
{
|
||||
uint_t lo = 0, hi = 0;
|
||||
int_t final_j = -1;
|
||||
uint_t n_ready = 0;
|
||||
int_t *cols;
|
||||
cost_t *d;
|
||||
|
||||
NEW(cols, int_t, n);
|
||||
NEW(d, cost_t, n);
|
||||
|
||||
for (uint_t i = 0; i < n; i++) {
|
||||
cols[i] = i;
|
||||
pred[i] = start_i;
|
||||
d[i] = cost[start_i][i] - v[i];
|
||||
}
|
||||
PRINT_COST_ARRAY(d, n);
|
||||
while (final_j == -1) {
|
||||
// No columns left on the SCAN list.
|
||||
if (lo == hi) {
|
||||
PRINTF("%d..%d -> find\n", lo, hi);
|
||||
n_ready = lo;
|
||||
hi = _find_dense(n, lo, d, cols, y);
|
||||
PRINTF("check %d..%d\n", lo, hi);
|
||||
PRINT_INDEX_ARRAY(cols, n);
|
||||
for (uint_t k = lo; k < hi; k++) {
|
||||
const int_t j = cols[k];
|
||||
if (y[j] < 0) {
|
||||
final_j = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (final_j == -1) {
|
||||
PRINTF("%d..%d -> scan\n", lo, hi);
|
||||
final_j = _scan_dense(
|
||||
n, cost, &lo, &hi, d, cols, pred, y, v);
|
||||
PRINT_COST_ARRAY(d, n);
|
||||
PRINT_INDEX_ARRAY(cols, n);
|
||||
PRINT_INDEX_ARRAY(pred, n);
|
||||
}
|
||||
}
|
||||
|
||||
PRINTF("found final_j=%d\n", final_j);
|
||||
PRINT_INDEX_ARRAY(cols, n);
|
||||
{
|
||||
const cost_t mind = d[cols[lo]];
|
||||
for (uint_t k = 0; k < n_ready; k++) {
|
||||
const int_t j = cols[k];
|
||||
v[j] += d[j] - mind;
|
||||
}
|
||||
}
|
||||
|
||||
FREE(cols);
|
||||
FREE(d);
|
||||
|
||||
return final_j;
|
||||
}
|
||||
|
||||
|
||||
/** Augment for a dense cost matrix.
|
||||
*/
|
||||
int_t _ca_dense(
|
||||
const uint_t n, cost_t *cost[],
|
||||
const uint_t n_free_rows,
|
||||
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
|
||||
{
|
||||
int_t *pred;
|
||||
|
||||
NEW(pred, int_t, n);
|
||||
|
||||
for (int_t *pfree_i = free_rows; pfree_i < free_rows + n_free_rows; pfree_i++) {
|
||||
int_t i = -1, j;
|
||||
uint_t k = 0;
|
||||
|
||||
PRINTF("looking at free_i=%d\n", *pfree_i);
|
||||
j = find_path_dense(n, cost, *pfree_i, y, v, pred);
|
||||
ASSERT(j >= 0);
|
||||
ASSERT(j < n);
|
||||
while (i != *pfree_i) {
|
||||
PRINTF("augment %d\n", j);
|
||||
PRINT_INDEX_ARRAY(pred, n);
|
||||
i = pred[j];
|
||||
PRINTF("y[%d]=%d -> %d\n", j, y[j], i);
|
||||
y[j] = i;
|
||||
PRINT_INDEX_ARRAY(x, n);
|
||||
SWAP_INDICES(j, x[i]);
|
||||
k++;
|
||||
if (k >= n) {
|
||||
ASSERT(FALSE);
|
||||
}
|
||||
}
|
||||
}
|
||||
FREE(pred);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/** Solve dense sparse LAP.
|
||||
*/
|
||||
int lapjv_internal(
|
||||
const uint_t n, cost_t *cost[],
|
||||
int_t *x, int_t *y)
|
||||
{
|
||||
int ret;
|
||||
int_t *free_rows;
|
||||
cost_t *v;
|
||||
|
||||
NEW(free_rows, int_t, n);
|
||||
NEW(v, cost_t, n);
|
||||
ret = _ccrrt_dense(n, cost, free_rows, x, y, v);
|
||||
int i = 0;
|
||||
while (ret > 0 && i < 2) {
|
||||
ret = _carr_dense(n, cost, ret, free_rows, x, y, v);
|
||||
i++;
|
||||
}
|
||||
if (ret > 0) {
|
||||
ret = _ca_dense(n, cost, ret, free_rows, x, y, v);
|
||||
}
|
||||
FREE(v);
|
||||
FREE(free_rows);
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
#ifndef LAPJV_H
|
||||
#define LAPJV_H
|
||||
|
||||
#define LARGE 1000000
|
||||
|
||||
#if !defined TRUE
|
||||
#define TRUE 1
|
||||
#endif
|
||||
#if !defined FALSE
|
||||
#define FALSE 0
|
||||
#endif
|
||||
|
||||
#define NEW(x, t, n) if ((x = (t *)malloc(sizeof(t) * (n))) == 0) { return -1; }
|
||||
#define FREE(x) if (x != 0) { free(x); x = 0; }
|
||||
#define SWAP_INDICES(a, b) { int_t _temp_index = a; a = b; b = _temp_index; }
|
||||
|
||||
#if 0
|
||||
#include <assert.h>
|
||||
#define ASSERT(cond) assert(cond)
|
||||
#define PRINTF(fmt, ...) printf(fmt, ##__VA_ARGS__)
|
||||
#define PRINT_COST_ARRAY(a, n) \
|
||||
while (1) { \
|
||||
printf(#a" = ["); \
|
||||
if ((n) > 0) { \
|
||||
printf("%f", (a)[0]); \
|
||||
for (uint_t j = 1; j < n; j++) { \
|
||||
printf(", %f", (a)[j]); \
|
||||
} \
|
||||
} \
|
||||
printf("]\n"); \
|
||||
break; \
|
||||
}
|
||||
#define PRINT_INDEX_ARRAY(a, n) \
|
||||
while (1) { \
|
||||
printf(#a" = ["); \
|
||||
if ((n) > 0) { \
|
||||
printf("%d", (a)[0]); \
|
||||
for (uint_t j = 1; j < n; j++) { \
|
||||
printf(", %d", (a)[j]); \
|
||||
} \
|
||||
} \
|
||||
printf("]\n"); \
|
||||
break; \
|
||||
}
|
||||
#else
|
||||
#define ASSERT(cond)
|
||||
#define PRINTF(fmt, ...)
|
||||
#define PRINT_COST_ARRAY(a, n)
|
||||
#define PRINT_INDEX_ARRAY(a, n)
|
||||
#endif
|
||||
|
||||
|
||||
typedef signed int int_t;
|
||||
typedef unsigned int uint_t;
|
||||
typedef double cost_t;
|
||||
typedef char boolean;
|
||||
typedef enum fp_t { FP_1 = 1, FP_2 = 2, FP_DYNAMIC = 3 } fp_t;
|
||||
|
||||
extern int_t lapjv_internal(
|
||||
const uint_t n, cost_t *cost[],
|
||||
int_t *x, int_t *y);
|
||||
|
||||
#endif // LAPJV_H
|
|
@ -0,0 +1,432 @@
|
|||
#include "BYTETracker.h"
|
||||
#include "lapjv.h"
|
||||
|
||||
namespace sv {
|
||||
|
||||
std::vector<STrack*> BYTETracker::joint_stracks( std::vector<STrack*> &tlista, std::vector<STrack> &tlistb)
|
||||
{
|
||||
std::map<int, int> exists;
|
||||
std::vector<STrack*> res;
|
||||
for (int i = 0; i < tlista.size(); i++)
|
||||
{
|
||||
exists.insert(std::pair<int, int>(tlista[i]->track_id, 1));
|
||||
res.push_back(tlista[i]);
|
||||
}
|
||||
for (int i = 0; i < tlistb.size(); i++)
|
||||
{
|
||||
int tid = tlistb[i].track_id;
|
||||
if (!exists[tid] || exists.count(tid) == 0)
|
||||
{
|
||||
exists[tid] = 1;
|
||||
res.push_back(&tlistb[i]);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<STrack> BYTETracker::joint_stracks( std::vector<STrack> &tlista, std::vector<STrack> &tlistb)
|
||||
{
|
||||
std::map<int, int> exists;
|
||||
std::vector<STrack> res;
|
||||
for (int i = 0; i < tlista.size(); i++)
|
||||
{
|
||||
exists.insert(std::pair<int, int>(tlista[i].track_id, 1));
|
||||
res.push_back(tlista[i]);
|
||||
}
|
||||
for (int i = 0; i < tlistb.size(); i++)
|
||||
{
|
||||
int tid = tlistb[i].track_id;
|
||||
if (!exists[tid] || exists.count(tid) == 0)
|
||||
{
|
||||
exists[tid] = 1;
|
||||
res.push_back(tlistb[i]);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<STrack> BYTETracker::sub_stracks( std::vector<STrack> &tlista, std::vector<STrack> &tlistb)
|
||||
{
|
||||
std::map<int, STrack> stracks;
|
||||
for (int i = 0; i < tlista.size(); i++)
|
||||
{
|
||||
stracks.insert(std::pair<int, STrack>(tlista[i].track_id, tlista[i]));
|
||||
}
|
||||
for (int i = 0; i < tlistb.size(); i++)
|
||||
{
|
||||
int tid = tlistb[i].track_id;
|
||||
if (stracks.count(tid) != 0)
|
||||
{
|
||||
stracks.erase(tid);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<STrack> res;
|
||||
std::map<int, STrack>::iterator it;
|
||||
for (it = stracks.begin(); it != stracks.end(); ++it)
|
||||
{
|
||||
res.push_back(it->second);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void BYTETracker::remove_duplicate_stracks( std::vector<STrack> &resa, std::vector<STrack> &resb, std::vector<STrack> &stracksa, std::vector<STrack> &stracksb)
|
||||
{
|
||||
std::vector< std::vector<float> > pdist = iou_distance(stracksa, stracksb);
|
||||
std::vector<std::pair<int, int> > pairs;
|
||||
for (int i = 0; i < pdist.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < pdist[i].size(); j++)
|
||||
{
|
||||
if (pdist[i][j] < 0.15)
|
||||
{
|
||||
pairs.push_back(std::pair<int, int>(i, j));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<int> dupa, dupb;
|
||||
for (int i = 0; i < pairs.size(); i++)
|
||||
{
|
||||
int timep = stracksa[pairs[i].first].frame_id - stracksa[pairs[i].first].start_frame;
|
||||
int timeq = stracksb[pairs[i].second].frame_id - stracksb[pairs[i].second].start_frame;
|
||||
if (timep > timeq)
|
||||
dupb.push_back(pairs[i].second);
|
||||
else
|
||||
dupa.push_back(pairs[i].first);
|
||||
}
|
||||
|
||||
for (int i = 0; i < stracksa.size(); i++)
|
||||
{
|
||||
std::vector<int>::iterator iter = find(dupa.begin(), dupa.end(), i);
|
||||
if (iter == dupa.end())
|
||||
{
|
||||
resa.push_back(stracksa[i]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < stracksb.size(); i++)
|
||||
{
|
||||
std::vector<int>::iterator iter = find(dupb.begin(), dupb.end(), i);
|
||||
if (iter == dupb.end())
|
||||
{
|
||||
resb.push_back(stracksb[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BYTETracker::linear_assignment( std::vector< std::vector<float> > &cost_matrix, int cost_matrix_size, int cost_matrix_size_size, float thresh,
|
||||
std::vector< std::vector<int> > &matches, std::vector<int> &unmatched_a, std::vector<int> &unmatched_b)
|
||||
{
|
||||
if (cost_matrix.size() == 0)
|
||||
{
|
||||
for (int i = 0; i < cost_matrix_size; i++)
|
||||
{
|
||||
unmatched_a.push_back(i);
|
||||
}
|
||||
for (int i = 0; i < cost_matrix_size_size; i++)
|
||||
{
|
||||
unmatched_b.push_back(i);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<int> rowsol; std::vector<int> colsol;
|
||||
float c = lapjv(cost_matrix, rowsol, colsol, true, thresh);
|
||||
for (int i = 0; i < rowsol.size(); i++)
|
||||
{
|
||||
if (rowsol[i] >= 0)
|
||||
{
|
||||
std::vector<int> match;
|
||||
match.push_back(i);
|
||||
match.push_back(rowsol[i]);
|
||||
matches.push_back(match);
|
||||
}
|
||||
else
|
||||
{
|
||||
unmatched_a.push_back(i);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < colsol.size(); i++)
|
||||
{
|
||||
if (colsol[i] < 0)
|
||||
{
|
||||
unmatched_b.push_back(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector< std::vector<float> > BYTETracker::ious( std::vector< std::vector<float> > &atlbrs, std::vector< std::vector<float> > &btlbrs)
|
||||
{
|
||||
std::vector< std::vector<float> > ious;
|
||||
if (atlbrs.size()*btlbrs.size() == 0)
|
||||
return ious;
|
||||
|
||||
ious.resize(atlbrs.size());
|
||||
for (int i = 0; i < ious.size(); i++)
|
||||
{
|
||||
ious[i].resize(btlbrs.size());
|
||||
}
|
||||
|
||||
//bbox_ious
|
||||
for (int k = 0; k < btlbrs.size(); k++)
|
||||
{
|
||||
std::vector<float> ious_tmp;
|
||||
float box_area = (btlbrs[k][2] - btlbrs[k][0] + 1)*(btlbrs[k][3] - btlbrs[k][1] + 1);
|
||||
for (int n = 0; n < atlbrs.size(); n++)
|
||||
{
|
||||
float iw = cv::min(atlbrs[n][2], btlbrs[k][2]) - cv::max(atlbrs[n][0], btlbrs[k][0]) + 1;
|
||||
if (iw > 0)
|
||||
{
|
||||
float ih = cv::min(atlbrs[n][3], btlbrs[k][3]) - cv::max(atlbrs[n][1], btlbrs[k][1]) + 1;
|
||||
if(ih > 0)
|
||||
{
|
||||
float ua = (atlbrs[n][2] - atlbrs[n][0] + 1)*(atlbrs[n][3] - atlbrs[n][1] + 1) + box_area - iw * ih;
|
||||
ious[n][k] = iw * ih / ua;
|
||||
}
|
||||
else
|
||||
{
|
||||
ious[n][k] = 0.0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ious[n][k] = 0.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ious;
|
||||
}
|
||||
|
||||
std::vector< std::vector<float> > BYTETracker::iou_distance( std::vector<STrack*> &atracks, std::vector<STrack> &btracks, int &dist_size, int &dist_size_size)
|
||||
{
|
||||
std::vector< std::vector<float> > cost_matrix;
|
||||
if (atracks.size() * btracks.size() == 0)
|
||||
{
|
||||
dist_size = atracks.size();
|
||||
dist_size_size = btracks.size();
|
||||
return cost_matrix;
|
||||
}
|
||||
std::vector< std::vector<float> > atlbrs, btlbrs;
|
||||
for (int i = 0; i < atracks.size(); i++)
|
||||
{
|
||||
atlbrs.push_back(atracks[i]->tlbr);
|
||||
}
|
||||
for (int i = 0; i < btracks.size(); i++)
|
||||
{
|
||||
btlbrs.push_back(btracks[i].tlbr);
|
||||
}
|
||||
|
||||
dist_size = atracks.size();
|
||||
dist_size_size = btracks.size();
|
||||
|
||||
std::vector< std::vector<float> > _ious = ious(atlbrs, btlbrs);
|
||||
|
||||
for (int i = 0; i < _ious.size();i++)
|
||||
{
|
||||
std::vector<float> _iou;
|
||||
for (int j = 0; j < _ious[i].size(); j++)
|
||||
{
|
||||
_iou.push_back(1 - _ious[i][j]);
|
||||
}
|
||||
cost_matrix.push_back(_iou);
|
||||
}
|
||||
|
||||
return cost_matrix;
|
||||
}
|
||||
|
||||
std::vector< std::vector<float> > BYTETracker::iou_distance( std::vector<STrack> &atracks, std::vector<STrack> &btracks)
|
||||
{
|
||||
std::vector< std::vector<float> > atlbrs, btlbrs;
|
||||
for (int i = 0; i < atracks.size(); i++)
|
||||
{
|
||||
atlbrs.push_back(atracks[i].tlbr);
|
||||
}
|
||||
for (int i = 0; i < btracks.size(); i++)
|
||||
{
|
||||
btlbrs.push_back(btracks[i].tlbr);
|
||||
}
|
||||
|
||||
std::vector< std::vector<float> > _ious = ious(atlbrs, btlbrs);
|
||||
std::vector< std::vector<float> > cost_matrix;
|
||||
for (int i = 0; i < _ious.size(); i++)
|
||||
{
|
||||
std::vector<float> _iou;
|
||||
for (int j = 0; j < _ious[i].size(); j++)
|
||||
{
|
||||
_iou.push_back(1 - _ious[i][j]);
|
||||
}
|
||||
cost_matrix.push_back(_iou);
|
||||
}
|
||||
|
||||
return cost_matrix;
|
||||
}
|
||||
|
||||
double BYTETracker::lapjv(const std::vector< std::vector<float> > &cost, std::vector<int> &rowsol, std::vector<int> &colsol,
|
||||
bool extend_cost, float cost_limit, bool return_cost)
|
||||
{
|
||||
std::vector< std::vector<float> > cost_c;
|
||||
cost_c.assign(cost.begin(), cost.end());
|
||||
|
||||
std::vector< std::vector<float> > cost_c_extended;
|
||||
|
||||
int n_rows = cost.size();
|
||||
int n_cols = cost[0].size();
|
||||
rowsol.resize(n_rows);
|
||||
colsol.resize(n_cols);
|
||||
|
||||
int n = 0;
|
||||
if (n_rows == n_cols)
|
||||
{
|
||||
n = n_rows;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!extend_cost)
|
||||
{
|
||||
std::cout << "set extend_cost=True" << std::endl;
|
||||
system("pause");
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
if (extend_cost || cost_limit < LONG_MAX)
|
||||
{
|
||||
n = n_rows + n_cols;
|
||||
cost_c_extended.resize(n);
|
||||
for (int i = 0; i < cost_c_extended.size(); i++)
|
||||
cost_c_extended[i].resize(n);
|
||||
|
||||
if (cost_limit < LONG_MAX)
|
||||
{
|
||||
for (int i = 0; i < cost_c_extended.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < cost_c_extended[i].size(); j++)
|
||||
{
|
||||
cost_c_extended[i][j] = cost_limit / 2.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
float cost_max = -1;
|
||||
for (int i = 0; i < cost_c.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < cost_c[i].size(); j++)
|
||||
{
|
||||
if (cost_c[i][j] > cost_max)
|
||||
cost_max = cost_c[i][j];
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < cost_c_extended.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < cost_c_extended[i].size(); j++)
|
||||
{
|
||||
cost_c_extended[i][j] = cost_max + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = n_rows; i < cost_c_extended.size(); i++)
|
||||
{
|
||||
for (int j = n_cols; j < cost_c_extended[i].size(); j++)
|
||||
{
|
||||
cost_c_extended[i][j] = 0;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < n_rows; i++)
|
||||
{
|
||||
for (int j = 0; j < n_cols; j++)
|
||||
{
|
||||
cost_c_extended[i][j] = cost_c[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
cost_c.clear();
|
||||
cost_c.assign(cost_c_extended.begin(), cost_c_extended.end());
|
||||
}
|
||||
|
||||
double **cost_ptr;
|
||||
cost_ptr = new double *[sizeof(double *) * n];
|
||||
for (int i = 0; i < n; i++)
|
||||
cost_ptr[i] = new double[sizeof(double) * n];
|
||||
|
||||
for (int i = 0; i < n; i++)
|
||||
{
|
||||
for (int j = 0; j < n; j++)
|
||||
{
|
||||
cost_ptr[i][j] = cost_c[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
int* x_c = new int[sizeof(int) * n];
|
||||
int *y_c = new int[sizeof(int) * n];
|
||||
|
||||
int ret = lapjv_internal(n, cost_ptr, x_c, y_c);
|
||||
if (ret != 0)
|
||||
{
|
||||
std::cout << "Calculate Wrong!" << std::endl;
|
||||
system("pause");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
double opt = 0.0;
|
||||
|
||||
if (n != n_rows)
|
||||
{
|
||||
for (int i = 0; i < n; i++)
|
||||
{
|
||||
if (x_c[i] >= n_cols)
|
||||
x_c[i] = -1;
|
||||
if (y_c[i] >= n_rows)
|
||||
y_c[i] = -1;
|
||||
}
|
||||
for (int i = 0; i < n_rows; i++)
|
||||
{
|
||||
rowsol[i] = x_c[i];
|
||||
}
|
||||
for (int i = 0; i < n_cols; i++)
|
||||
{
|
||||
colsol[i] = y_c[i];
|
||||
}
|
||||
|
||||
if (return_cost)
|
||||
{
|
||||
for (int i = 0; i < rowsol.size(); i++)
|
||||
{
|
||||
if (rowsol[i] != -1)
|
||||
{
|
||||
//cout << i << "\t" << rowsol[i] << "\t" << cost_ptr[i][rowsol[i]] << endl;
|
||||
opt += cost_ptr[i][rowsol[i]];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (return_cost)
|
||||
{
|
||||
for (int i = 0; i < rowsol.size(); i++)
|
||||
{
|
||||
opt += cost_ptr[i][rowsol[i]];
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < n; i++)
|
||||
{
|
||||
delete[]cost_ptr[i];
|
||||
}
|
||||
delete[]cost_ptr;
|
||||
delete[]x_c;
|
||||
delete[]y_c;
|
||||
|
||||
return opt;
|
||||
}
|
||||
|
||||
cv::Scalar BYTETracker::get_color(int idx)
|
||||
{
|
||||
idx += 3;
|
||||
return cv::Scalar(37 * idx % 255, 17 * idx % 255, 29 * idx % 255);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,377 @@
|
|||
#include "sort.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
#include "gason.h"
|
||||
#include "sv_util.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
namespace sv {
|
||||
|
||||
|
||||
KalmanFilter::KalmanFilter()
|
||||
{
|
||||
this->_chi2inv95 << 3.8415, 5.9915, 7.8147, 9.4877, 11.070, 12.592, 14.067, 15.507, 16.919;
|
||||
this->_F = MatrixXd::Identity(8, 8);
|
||||
for (int i=0; i<4; i++)
|
||||
{
|
||||
this->_F(i,i+4) = 1.; //1
|
||||
}
|
||||
this->_H = MatrixXd::Identity(4, 8);
|
||||
this->_std_weight_position = 1. / 20; //1./20
|
||||
this->_std_weight_vel = 1. / 160; //1./160
|
||||
}
|
||||
|
||||
KalmanFilter::~KalmanFilter()
|
||||
{
|
||||
}
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::initiate(Vector4d &bbox)
|
||||
{
|
||||
Matrix<double,8,1> mean;
|
||||
Matrix<double,4,1> zero_vector;
|
||||
zero_vector.setZero();
|
||||
mean << bbox(0), bbox(1), (double)bbox(2) / (double)bbox(3), bbox(3), zero_vector;
|
||||
VectorXd stds(8);
|
||||
stds << 2 * this->_std_weight_position * mean(3), 2 * this->_std_weight_position * mean(3), 0.01, 2 * this->_std_weight_position * mean(3), \
|
||||
10 * this->_std_weight_vel * mean(3), 10 * this->_std_weight_vel * mean(3), 1e-5, 10 * this->_std_weight_vel * mean(3);
|
||||
MatrixXd squared = stds.array().square();
|
||||
Matrix<double, 8, 8> covariances;
|
||||
covariances = squared.asDiagonal();
|
||||
return make_pair(mean, covariances);
|
||||
}
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::update(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances, sv::Box &box)
|
||||
{
|
||||
VectorXd measurement(4);
|
||||
double a = (double)(box.x2-box.x1) / (double)(box.y2-box.y1);
|
||||
measurement << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, a, box.y2-box.y1;
|
||||
pair<Matrix<double, 4, 1>, Matrix<double, 4, 4> > projected = project(mean, covariances);
|
||||
Matrix<double, 4, 1> projected_mean = projected.first;
|
||||
Matrix<double, 4, 4> projected_cov = projected.second;
|
||||
|
||||
Eigen::LLT<Eigen::MatrixXd> chol_factor(projected_cov);
|
||||
MatrixXd Kalman_gain = (chol_factor.solve((covariances * this->_H.transpose()).transpose())).transpose();
|
||||
|
||||
VectorXd innovation = measurement - projected_mean;
|
||||
Matrix<double, 8, 1> new_mean = mean + Kalman_gain *innovation;
|
||||
Matrix<double, 8, 8> new_covariances = covariances - Kalman_gain * projected_cov * Kalman_gain.transpose();
|
||||
|
||||
return make_pair(new_mean, new_covariances);
|
||||
}
|
||||
|
||||
pair<Matrix<double, 4, 1>, Matrix<double, 4, 4> > KalmanFilter::project(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances)
|
||||
{
|
||||
VectorXd stds(4);
|
||||
stds << this->_std_weight_position * mean(3), this->_std_weight_position * mean(3), 0.1, this->_std_weight_position * mean(3);
|
||||
MatrixXd squared = stds.array().square();
|
||||
MatrixXd R = squared.asDiagonal();
|
||||
|
||||
Matrix<double, 4, 1> pro_mean = this->_H * mean;
|
||||
Matrix<double, 4, 4> pro_covariances = this->_H * covariances * this->_H.transpose() + R;
|
||||
return make_pair(pro_mean, pro_covariances);
|
||||
}
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::predict(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances)
|
||||
{
|
||||
VectorXd stds(8);
|
||||
stds << this->_std_weight_position * mean(3), this->_std_weight_position * mean(3), 1e-2, this->_std_weight_position * mean(3), \
|
||||
this->_std_weight_vel * mean(3), this->_std_weight_vel * mean(3), 1e-5, this->_std_weight_vel * mean(3); // a = 0.01
|
||||
MatrixXd squared = stds.array().square();
|
||||
MatrixXd Q = squared.asDiagonal();
|
||||
Matrix<double, 8, 1> pre_mean = this->_F * mean;
|
||||
Matrix<double, 8, 8> pre_cov = this->_F * covariances * this->_F.transpose()+Q;//+Q
|
||||
return make_pair(pre_mean, pre_cov);
|
||||
}
|
||||
|
||||
|
||||
SORT::~SORT()
|
||||
{
|
||||
}
|
||||
|
||||
void SORT::update(TargetsInFrame& tgts)
|
||||
{
|
||||
KalmanFilter kf;
|
||||
if (! this->_tracklets.size() || tgts.targets.size() == 0)
|
||||
{
|
||||
Vector4d bbox;
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[i].getBox(box);
|
||||
Tracklet tracklet;
|
||||
tracklet.id = ++ this->_next_tracklet_id;
|
||||
tgts.targets[i].tracked_id = this->_next_tracklet_id;
|
||||
tgts.targets[i].has_tid = true;
|
||||
|
||||
tracklet.bbox << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, box.x2-box.x1, box.y2-box.y1; // x,y,w,h; center(x,y)
|
||||
|
||||
tracklet.age = 0;
|
||||
tracklet.hits = 1;
|
||||
//tracklet.misses = 0;
|
||||
tracklet.frame_id = tgts.frame_id;
|
||||
tracklet.category_id = tgts.targets[i].category_id;
|
||||
if (tgts.frame_id == 0)
|
||||
{
|
||||
tracklet.tentative = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
tracklet.tentative = true;
|
||||
}
|
||||
// initate the motion
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > motion = kf.initiate(tracklet.bbox);
|
||||
tracklet.mean = motion.first;
|
||||
tracklet.covariance = motion.second;
|
||||
|
||||
this->_tracklets.push_back(tracklet);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
tgts.targets[i].tracked_id = 0;
|
||||
tgts.targets[i].has_tid = true;
|
||||
}
|
||||
|
||||
vector<int> match_det(tgts.targets.size(), -1);
|
||||
// predict the next state of each tracklet
|
||||
for (auto& tracklet : this->_tracklets)
|
||||
{
|
||||
tracklet.age++;
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > motion = kf.predict(tracklet.mean, tracklet.covariance);
|
||||
tracklet.bbox << motion.first(0), motion.first(1), motion.first(2) * motion.first(3), motion.first(3);
|
||||
tracklet.mean = motion.first;
|
||||
tracklet.covariance = motion.second;
|
||||
}
|
||||
|
||||
// Match the detections to the existing tracklets
|
||||
vector<vector<double> > iouMatrix(this->_tracklets.size(), vector<double> (tgts.targets.size(), 0));
|
||||
for (int i=0; i<this->_tracklets.size(); i++)
|
||||
{
|
||||
for (int j=0; j<tgts.targets.size(); j++)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[j].getBox(box);
|
||||
iouMatrix[i][j] = this->_iou(this->_tracklets[i], box);
|
||||
}
|
||||
}
|
||||
|
||||
vector<pair<int, int> > matches = this->_hungarian(iouMatrix);
|
||||
for (auto& match : matches)
|
||||
{
|
||||
int trackletIndex = match.first;
|
||||
int detectionIndex = match.second;
|
||||
if (trackletIndex >= 0 && detectionIndex >= 0)
|
||||
{
|
||||
if (iouMatrix[match.first][match.second] <= 1-_iou_threshold) // iou_thrshold
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[detectionIndex].getBox(box);
|
||||
this->_tracklets[trackletIndex].age = 0;
|
||||
this->_tracklets[trackletIndex].hits++;
|
||||
this->_tracklets[trackletIndex].frame_id = tgts.frame_id;
|
||||
this->_tracklets[trackletIndex].bbox << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, box.x2-box.x1, box.y2-box.y1;
|
||||
tgts.targets[detectionIndex].tracked_id = this->_tracklets[trackletIndex].id;
|
||||
match_det[detectionIndex] = trackletIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector <vector<double>> ().swap(iouMatrix);
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
if (match_det[i] == -1)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[i].getBox(box);
|
||||
Tracklet tracklet;
|
||||
tracklet.id = ++ this->_next_tracklet_id;
|
||||
tracklet.bbox << box.x1+(box.x2-box.x1)/2, (double)(box.y1+(box.y2-box.y1)/2), box.x2-box.x1, box.y2-box.y1; // c_x, c_y, w, h
|
||||
tracklet.age = 0;
|
||||
tracklet.hits = 1;
|
||||
tracklet.frame_id = tgts.frame_id;
|
||||
tracklet.category_id = tgts.targets[i].category_id;
|
||||
tracklet.tentative = true;
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > new_motion = kf.initiate(tracklet.bbox);
|
||||
tracklet.mean = new_motion.first;
|
||||
tracklet.covariance = new_motion.second;
|
||||
|
||||
tgts.targets[i].tracked_id = this->_next_tracklet_id;
|
||||
tgts.targets[i].has_tid = true;
|
||||
this->_tracklets.push_back(tracklet);
|
||||
}
|
||||
else
|
||||
{
|
||||
sv::Box box;
|
||||
int track_id = match_det[i];
|
||||
tgts.targets[i].getBox(box);
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > updated = kf.update(this->_tracklets[track_id].mean, this->_tracklets[track_id].covariance, box);
|
||||
this->_tracklets[track_id].mean = updated.first;
|
||||
this->_tracklets[track_id].covariance = updated.second;
|
||||
}
|
||||
}
|
||||
|
||||
//sift tracklets
|
||||
for (auto& tracklet : this->_tracklets)
|
||||
{
|
||||
if (tracklet.hits >= _min_hits)
|
||||
{
|
||||
tracklet.tentative = false;
|
||||
}
|
||||
if ((tgts.frame_id-tracklet.frame_id <= _max_age) && !(tracklet.tentative && tracklet.frame_id != tgts.frame_id))
|
||||
{
|
||||
_new_tracklets.push_back(tracklet);
|
||||
}
|
||||
}
|
||||
_tracklets = _new_tracklets;
|
||||
std::vector <Tracklet> ().swap(_new_tracklets);
|
||||
}
|
||||
}
|
||||
|
||||
vector<Tracklet> SORT::getTracklets() const
|
||||
{
|
||||
return this->_tracklets;
|
||||
}
|
||||
|
||||
double SORT::_iou(Tracklet& tracklet, sv::Box& box)
|
||||
{
|
||||
double trackletX1 = tracklet.bbox(0)-tracklet.bbox(2)/2;
|
||||
double trackletY1 = tracklet.bbox(1)-tracklet.bbox(3)/2;
|
||||
double trackletX2 = tracklet.bbox(0) + tracklet.bbox(2)/2;
|
||||
double trackletY2 = tracklet.bbox(1) + tracklet.bbox(3)/2;
|
||||
|
||||
double detectionX1 = box.x1;
|
||||
double detectionY1 = box.y1;
|
||||
double detectionX2 = box.x2;
|
||||
double detectionY2 = box.y2;
|
||||
|
||||
double intersectionX1 = max(trackletX1, detectionX1);
|
||||
double intersectionY1 = max(trackletY1, detectionY1);
|
||||
double intersectionX2 = min(trackletX2, detectionX2);
|
||||
double intersectionY2 = min(trackletY2, detectionY2);
|
||||
|
||||
double w = (intersectionX2-intersectionX1 > 0.0) ? (intersectionX2-intersectionX1) : 0.0;
|
||||
double h = (intersectionY2-intersectionY1 > 0.0) ? (intersectionY2-intersectionY1) : 0.0;
|
||||
double intersectionArea = w * h;
|
||||
|
||||
double trackletArea = tracklet.bbox(2) * tracklet.bbox(3);
|
||||
|
||||
double detectionArea = (box.x2-box.x1) * (box.y2-box.y1);
|
||||
double unionArea = trackletArea + detectionArea - intersectionArea;
|
||||
double iou = (1 - intersectionArea / unionArea * 1.0);
|
||||
|
||||
return iou;
|
||||
}
|
||||
|
||||
// Function to find the minimum element in a vector
|
||||
double SORT::_findMin(const std::vector<double>& vec) {
|
||||
double minVal = std::numeric_limits<double>::max();
|
||||
for (double val : vec) {
|
||||
if (val < minVal) {
|
||||
minVal = val;
|
||||
}
|
||||
}
|
||||
return minVal;
|
||||
}
|
||||
|
||||
// Function to subtract the minimum value from each row of the cost matrix
|
||||
void SORT::_subtractMinFromRows(std::vector<std::vector<double>>& costMatrix) {
|
||||
for (auto& row : costMatrix) {
|
||||
double minVal = _findMin(row);
|
||||
for (double& val : row) {
|
||||
val -= minVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function to subtract the minimum value from each column of the cost matrix
|
||||
void SORT::_subtractMinFromCols(std::vector<std::vector<double>>& costMatrix) {
|
||||
for (size_t col = 0; col < costMatrix[0].size(); ++col) {
|
||||
double minVal = std::numeric_limits<double>::max();
|
||||
for (size_t row = 0; row < costMatrix.size(); ++row) {
|
||||
if (costMatrix[row][col] < minVal) {
|
||||
minVal = costMatrix[row][col];
|
||||
}
|
||||
}
|
||||
for (size_t row = 0; row < costMatrix.size(); ++row) {
|
||||
costMatrix[row][col] -= minVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function to find a matching using the Hungarian algorithm
|
||||
vector<pair<int, int> > SORT::_hungarian(vector<vector<double> > costMatrix)
|
||||
{
|
||||
size_t numRows = costMatrix.size();
|
||||
size_t numCols = costMatrix[0].size();
|
||||
|
||||
//transpose the matrix if necessary
|
||||
const bool transposed = numCols > numRows;
|
||||
if (transposed) {
|
||||
vector<vector<double>> transposedMatrix(numCols, vector<double>(numRows));
|
||||
for (int i = 0; i < numRows; i++)
|
||||
{
|
||||
for (int j = 0; j < numCols; j++)
|
||||
{
|
||||
transposedMatrix[j][i] = costMatrix[i][j];
|
||||
}
|
||||
}
|
||||
costMatrix = transposedMatrix;
|
||||
swap(numRows, numCols);
|
||||
}
|
||||
// Determine the larger dimension for matching
|
||||
size_t maxDim = std::max(numRows, numCols);
|
||||
|
||||
// Create a square cost matrix by padding with zeros if necessary
|
||||
std::vector<std::vector<double>> squareMatrix(maxDim, std::vector<double>(maxDim, 0.0));
|
||||
for (size_t row = 0; row < numRows; ++row) {
|
||||
for (size_t col = 0; col < numCols; ++col) {
|
||||
squareMatrix[row][col] = costMatrix[row][col];
|
||||
}
|
||||
}
|
||||
|
||||
// Subtract the minimum value from each row and column
|
||||
_subtractMinFromRows(squareMatrix);
|
||||
_subtractMinFromCols(squareMatrix);
|
||||
|
||||
// Initialize the assignment vectors with -1 values
|
||||
std::vector<int> rowAssignment(maxDim, -1);
|
||||
std::vector<int> colAssignment(maxDim, -1);
|
||||
|
||||
// Perform the matching
|
||||
for (size_t row = 0; row < maxDim; ++row) {
|
||||
std::vector<bool> visitedCols(maxDim, false);
|
||||
for (size_t col = 0; col < maxDim; ++col) {
|
||||
if (squareMatrix[row][col] == 0 && colAssignment[col] == -1) {
|
||||
rowAssignment[row] = col;
|
||||
colAssignment[col] = row;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the assignment vectors to pair<int, int> format
|
||||
std::vector<std::pair<int, int>> assignmentPairs;
|
||||
for (size_t row = 0; row < numRows; ++row) {
|
||||
int col = rowAssignment[row];
|
||||
if (col != -1) {
|
||||
if (col >= numCols) {
|
||||
col = -1;
|
||||
}
|
||||
assignmentPairs.emplace_back(row, col);
|
||||
}
|
||||
}
|
||||
if (transposed) {
|
||||
for (auto& assignment : assignmentPairs)
|
||||
{
|
||||
swap(assignment.first, assignment.second);
|
||||
}
|
||||
}
|
||||
return assignmentPairs;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
#ifndef __SV_SORT__
|
||||
#define __SV_SORT__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/aruco.hpp>
|
||||
#include <opencv2/tracking.hpp>
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
|
||||
namespace sv {
|
||||
|
||||
|
||||
|
||||
// define the tracklet struct to store the tracked objects.
|
||||
struct Tracklet
|
||||
{
|
||||
/* data */
|
||||
public:
|
||||
Eigen::Vector4d bbox; // double x, y, w, h;
|
||||
int id = 0;
|
||||
int age;
|
||||
int hits;
|
||||
int misses;
|
||||
int frame_id = 0;
|
||||
int category_id;
|
||||
bool tentative;
|
||||
std::vector<double> features;
|
||||
Eigen::Matrix<double, 8, 1> mean;
|
||||
Eigen::Matrix<double, 8, 8> covariance;
|
||||
};
|
||||
|
||||
|
||||
class KalmanFilter {
|
||||
public:
|
||||
KalmanFilter();
|
||||
~KalmanFilter();
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > initiate(Eigen::Vector4d &bbox);
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > update(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances, Box &box);
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > predict(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances);
|
||||
std::pair<Eigen::Matrix<double, 4, 1>, Eigen::Matrix<double, 4, 4> > project(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances);
|
||||
private:
|
||||
Eigen::Matrix<double, 8, 8> _F;
|
||||
Eigen::Matrix<double, 4, 8> _H;
|
||||
Eigen::Matrix<double, 9, 1> _chi2inv95;
|
||||
double _std_weight_position;
|
||||
double _std_weight_vel;
|
||||
};
|
||||
|
||||
|
||||
class SORT {
|
||||
public:
|
||||
SORT(double iou_threshold, int max_age, int min_hits): _iou_threshold(iou_threshold), _max_age(max_age), _min_hits(min_hits), _next_tracklet_id(0) {};
|
||||
~SORT();
|
||||
void update(TargetsInFrame &tgts);
|
||||
std::vector<Tracklet> getTracklets() const;
|
||||
private:
|
||||
double _iou(Tracklet &tracklet, Box &box);
|
||||
std::vector<std::pair<int,int>> _hungarian(std::vector<std::vector<double>> costMatrix);
|
||||
double _findMin(const std::vector<double>& vec);
|
||||
void _subtractMinFromRows(std::vector<std::vector<double>>& costMatrix);
|
||||
void _subtractMinFromCols(std::vector<std::vector<double>>& costMatrix);
|
||||
//bool _augment(const std::vector<std::vector<double>>& costMatrix, int row, std::vector<int>& rowMatch, std::vector<int>& colMatch, std::vector<bool>& visited);
|
||||
|
||||
double _iou_threshold;
|
||||
int _max_age;
|
||||
int _min_hits;
|
||||
int _next_tracklet_id;
|
||||
std::vector <Tracklet> _tracklets;
|
||||
std::vector <Tracklet> _new_tracklets;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
#endif
|
|
@ -5,6 +5,8 @@
|
|||
#include <vector>
|
||||
#include "gason.h"
|
||||
#include "sv_util.h"
|
||||
#include "sort.h"
|
||||
#include "BYTETracker.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
@ -16,15 +18,22 @@ MultipleObjectTracker::MultipleObjectTracker()
|
|||
{
|
||||
this->_params_loaded = false;
|
||||
this->_sort_impl = NULL;
|
||||
this->_bytetrack_impl = NULL;
|
||||
}
|
||||
MultipleObjectTracker::~MultipleObjectTracker()
|
||||
{
|
||||
if (this->_sort_impl)
|
||||
delete this->_sort_impl;
|
||||
else if (this->_bytetrack_impl)
|
||||
delete this->_bytetrack_impl;
|
||||
}
|
||||
|
||||
void MultipleObjectTracker::track(cv::Mat img_, TargetsInFrame& tgts_)
|
||||
sv::TargetsInFrame MultipleObjectTracker::track(cv::Mat img_, TargetsInFrame& tgts_)
|
||||
{
|
||||
sv::TargetsInFrame person_tgts(tgts_.frame_id);
|
||||
person_tgts.width = img_.size().width;
|
||||
person_tgts.height = img_.size().height;
|
||||
|
||||
if (!this->_params_loaded)
|
||||
{
|
||||
this->_load();
|
||||
|
@ -33,8 +42,28 @@ void MultipleObjectTracker::track(cv::Mat img_, TargetsInFrame& tgts_)
|
|||
if ("sort" == this->_algorithm && this->_sort_impl)
|
||||
{
|
||||
this->_detector->detect(img_, tgts_);
|
||||
this->_sort_impl->update(tgts_);
|
||||
for (auto target : tgts_.targets)
|
||||
{
|
||||
if (target.category_id == 0)
|
||||
{
|
||||
person_tgts.targets.push_back(target);
|
||||
}
|
||||
}
|
||||
this->_sort_impl->update(person_tgts);
|
||||
}
|
||||
else if ("bytetrack" == this->_algorithm && this->_bytetrack_impl)
|
||||
{
|
||||
this->_detector->detect(img_, tgts_);
|
||||
for (auto target : tgts_.targets)
|
||||
{
|
||||
if (target.category_id == 0)
|
||||
{
|
||||
person_tgts.targets.push_back(target);
|
||||
}
|
||||
}
|
||||
this->_bytetrack_impl->update(person_tgts);
|
||||
}
|
||||
return person_tgts;
|
||||
}
|
||||
|
||||
void MultipleObjectTracker::init(CommonObjectDetector* detector_)
|
||||
|
@ -48,6 +77,10 @@ void MultipleObjectTracker::init(CommonObjectDetector* detector_)
|
|||
{
|
||||
this->_sort_impl = new SORT(this->_iou_thres, this->_max_age, this->_min_hits);
|
||||
}
|
||||
else if("bytetrack" == this->_algorithm)
|
||||
{
|
||||
this->_bytetrack_impl = new BYTETracker(this->_frame_rate, this->_track_buffer);
|
||||
}
|
||||
this->_detector = detector_;
|
||||
}
|
||||
|
||||
|
@ -71,7 +104,7 @@ void MultipleObjectTracker::_load()
|
|||
else
|
||||
this->_with_reid = false;
|
||||
|
||||
std::cout << "with_reid: " << this->_with_reid << std::endl;
|
||||
//std::cout << "with_reid: " << this->_with_reid << std::endl;
|
||||
}
|
||||
else if ("reid_input_size" == std::string(i->key) && i->value.getTag() == JSON_ARRAY) {
|
||||
int jcnt = 0;
|
||||
|
@ -84,395 +117,39 @@ void MultipleObjectTracker::_load()
|
|||
}
|
||||
jcnt += 1;
|
||||
}
|
||||
std::cout << "reid_input_w: " << this->_reid_input_w << std::endl;
|
||||
std::cout << "reid_input_h: " << this->_reid_input_h << std::endl;
|
||||
//std::cout << "reid_input_w: " << this->_reid_input_w << std::endl;
|
||||
//std::cout << "reid_input_h: " << this->_reid_input_h << std::endl;
|
||||
}
|
||||
else if ("reid_num_samples" == std::string(i->key)) {
|
||||
this->_reid_num_samples = i->value.toNumber();
|
||||
std::cout << "reid_num_samples: " << this->_reid_num_samples << std::endl;
|
||||
//std::cout << "reid_num_samples: " << this->_reid_num_samples << std::endl;
|
||||
}
|
||||
else if ("reid_match_thres" == std::string(i->key)) {
|
||||
this->_reid_match_thres = i->value.toNumber();
|
||||
std::cout << "reid_match_thres: " << this->_reid_match_thres << std::endl;
|
||||
//std::cout << "reid_match_thres: " << this->_reid_match_thres << std::endl;
|
||||
}
|
||||
else if ("iou_thres" == std::string(i->key)) {
|
||||
this->_iou_thres = i->value.toNumber();
|
||||
std::cout << "iou_thres: " << this->_iou_thres << std::endl;
|
||||
//std::cout << "iou_thres: " << this->_iou_thres << std::endl;
|
||||
}
|
||||
else if ("max_age" == std::string(i->key)) {
|
||||
this->_max_age = i->value.toNumber();
|
||||
std::cout << "max_age: " << this->_max_age << std::endl;
|
||||
//std::cout << "max_age: " << this->_max_age << std::endl;
|
||||
}
|
||||
else if ("min_hits" == std::string(i->key)) {
|
||||
this->_min_hits = i->value.toNumber();
|
||||
std::cout << "min_hits: " << this->_min_hits << std::endl;
|
||||
//std::cout << "min_hits: " << this->_min_hits << std::endl;
|
||||
}
|
||||
else if ("frame_rate" == std::string(i->key)) {
|
||||
this->_frame_rate = i->value.toNumber();
|
||||
//std::cout << "max_age: " << this->_max_age << std::endl;
|
||||
}
|
||||
else if ("track_buffer" == std::string(i->key)) {
|
||||
this->_track_buffer = i->value.toNumber();
|
||||
//std::cout << "min_hits: " << this->_min_hits << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
KalmanFilter::KalmanFilter()
|
||||
{
|
||||
this->_chi2inv95 << 3.8415, 5.9915, 7.8147, 9.4877, 11.070, 12.592, 14.067, 15.507, 16.919;
|
||||
this->_F = MatrixXd::Identity(8, 8);
|
||||
for (int i=0; i<4; i++)
|
||||
{
|
||||
this->_F(i,i+4) = 1.; //1
|
||||
}
|
||||
this->_H = MatrixXd::Identity(4, 8);
|
||||
this->_std_weight_position = 1. / 20; //1./20
|
||||
this->_std_weight_vel = 1. / 160; //1./160
|
||||
}
|
||||
|
||||
KalmanFilter::~KalmanFilter()
|
||||
{
|
||||
}
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::initiate(Vector4d &bbox)
|
||||
{
|
||||
Matrix<double,8,1> mean;
|
||||
Matrix<double,4,1> zero_vector;
|
||||
zero_vector.setZero();
|
||||
mean << bbox(0), bbox(1), (double)bbox(2) / (double)bbox(3), bbox(3), zero_vector;
|
||||
VectorXd stds(8);
|
||||
stds << 2 * this->_std_weight_position * mean(3), 2 * this->_std_weight_position * mean(3), 0.01, 2 * this->_std_weight_position * mean(3), \
|
||||
10 * this->_std_weight_vel * mean(3), 10 * this->_std_weight_vel * mean(3), 1e-5, 10 * this->_std_weight_vel * mean(3);
|
||||
MatrixXd squared = stds.array().square();
|
||||
Matrix<double, 8, 8> covariances;
|
||||
covariances = squared.asDiagonal();
|
||||
return make_pair(mean, covariances);
|
||||
}
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::update(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances, sv::Box &box)
|
||||
{
|
||||
VectorXd measurement(4);
|
||||
double a = (double)(box.x2-box.x1) / (double)(box.y2-box.y1);
|
||||
measurement << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, a, box.y2-box.y1;
|
||||
pair<Matrix<double, 4, 1>, Matrix<double, 4, 4> > projected = project(mean, covariances);
|
||||
Matrix<double, 4, 1> projected_mean = projected.first;
|
||||
Matrix<double, 4, 4> projected_cov = projected.second;
|
||||
|
||||
Eigen::LLT<Eigen::MatrixXd> chol_factor(projected_cov);
|
||||
MatrixXd Kalman_gain = (chol_factor.solve((covariances * this->_H.transpose()).transpose())).transpose();
|
||||
|
||||
VectorXd innovation = measurement - projected_mean;
|
||||
Matrix<double, 8, 1> new_mean = mean + Kalman_gain *innovation;
|
||||
Matrix<double, 8, 8> new_covariances = covariances - Kalman_gain * projected_cov * Kalman_gain.transpose();
|
||||
|
||||
return make_pair(new_mean, new_covariances);
|
||||
}
|
||||
|
||||
pair<Matrix<double, 4, 1>, Matrix<double, 4, 4> > KalmanFilter::project(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances)
|
||||
{
|
||||
VectorXd stds(4);
|
||||
stds << this->_std_weight_position * mean(3), this->_std_weight_position * mean(3), 0.1, this->_std_weight_position * mean(3);
|
||||
MatrixXd squared = stds.array().square();
|
||||
MatrixXd R = squared.asDiagonal();
|
||||
|
||||
Matrix<double, 4, 1> pro_mean = this->_H * mean;
|
||||
Matrix<double, 4, 4> pro_covariances = this->_H * covariances * this->_H.transpose() + R;
|
||||
return make_pair(pro_mean, pro_covariances);
|
||||
}
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::predict(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances)
|
||||
{
|
||||
VectorXd stds(8);
|
||||
stds << this->_std_weight_position * mean(3), this->_std_weight_position * mean(3), 1e-2, this->_std_weight_position * mean(3), \
|
||||
this->_std_weight_vel * mean(3), this->_std_weight_vel * mean(3), 1e-5, this->_std_weight_vel * mean(3); // a = 0.01
|
||||
MatrixXd squared = stds.array().square();
|
||||
MatrixXd Q = squared.asDiagonal();
|
||||
Matrix<double, 8, 1> pre_mean = this->_F * mean;
|
||||
Matrix<double, 8, 8> pre_cov = this->_F * covariances * this->_F.transpose()+Q;//+Q
|
||||
return make_pair(pre_mean, pre_cov);
|
||||
}
|
||||
|
||||
|
||||
SORT::~SORT()
|
||||
{
|
||||
}
|
||||
|
||||
void SORT::update(TargetsInFrame& tgts)
|
||||
{
|
||||
sv::KalmanFilter kf;
|
||||
if (! this->_tracklets.size() || tgts.targets.size() == 0)
|
||||
{
|
||||
Vector4d bbox;
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[i].getBox(box);
|
||||
Tracklet tracklet;
|
||||
tracklet.id = ++ this->_next_tracklet_id;
|
||||
tgts.targets[i].tracked_id = this->_next_tracklet_id;
|
||||
tgts.targets[i].has_tid = true;
|
||||
|
||||
tracklet.bbox << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, box.x2-box.x1, box.y2-box.y1; // x,y,w,h; center(x,y)
|
||||
|
||||
tracklet.age = 0;
|
||||
tracklet.hits = 1;
|
||||
tracklet.misses = 0;
|
||||
tracklet.frame_id = tgts.frame_id;
|
||||
tracklet.category_id = tgts.targets[i].category_id;
|
||||
tracklet.tentative = true;
|
||||
|
||||
// initate the motion
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > motion = kf.initiate(tracklet.bbox);
|
||||
tracklet.mean = motion.first;
|
||||
tracklet.covariance = motion.second;
|
||||
|
||||
this->_tracklets.push_back(tracklet);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// cout << "frame id:" << tgts.frame_id << endl;
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
tgts.targets[i].tracked_id = 0;
|
||||
tgts.targets[i].has_tid = true;
|
||||
}
|
||||
|
||||
vector<int> match_det(tgts.targets.size(), -1);
|
||||
// predict the next state of each tracklet
|
||||
for (auto& tracklet : this->_tracklets)
|
||||
{
|
||||
tracklet.age++;
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > motion = kf.predict(tracklet.mean, tracklet.covariance);
|
||||
tracklet.bbox << motion.first(0), motion.first(1), motion.first(2) * motion.first(3), motion.first(3);
|
||||
tracklet.mean = motion.first;
|
||||
tracklet.covariance = motion.second;
|
||||
}
|
||||
|
||||
// Match the detections to the existing tracklets
|
||||
vector<vector<double> > iouMatrix(this->_tracklets.size(), vector<double> (tgts.targets.size(), 0));
|
||||
for (int i=0; i<this->_tracklets.size(); i++)
|
||||
{
|
||||
for (int j=0; j<tgts.targets.size(); j++)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[j].getBox(box);
|
||||
iouMatrix[i][j] = this->_iou(this->_tracklets[i], box);
|
||||
}
|
||||
}
|
||||
|
||||
vector<pair<int, int> > matches = this->_hungarian(iouMatrix);
|
||||
for (auto& match : matches)
|
||||
{
|
||||
int trackletIndex = match.first;
|
||||
int detectionIndex = match.second;
|
||||
if (trackletIndex >= 0 && detectionIndex >= 0)
|
||||
{
|
||||
if (iouMatrix[match.first][match.second] <= 1-_iou_threshold) // iou_thrshold
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[detectionIndex].getBox(box);
|
||||
this->_tracklets[trackletIndex].age = 0;
|
||||
this->_tracklets[trackletIndex].hits++;
|
||||
this->_tracklets[trackletIndex].frame_id = tgts.frame_id;
|
||||
this->_tracklets[trackletIndex].bbox << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, box.x2-box.x1, box.y2-box.y1;
|
||||
tgts.targets[detectionIndex].tracked_id = this->_tracklets[trackletIndex].id;
|
||||
match_det[detectionIndex] = trackletIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector <vector<double>> ().swap(iouMatrix);
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
// cout << "match_det: index: " << i << " value: " << match_det[i] << endl;
|
||||
if (match_det[i] == -1)
|
||||
{
|
||||
// cout << "create new tracklet." << endl;
|
||||
sv::Box box;
|
||||
tgts.targets[i].getBox(box);
|
||||
Tracklet tracklet;
|
||||
tracklet.id = ++ this->_next_tracklet_id;
|
||||
tracklet.bbox << box.x1+(box.x2-box.x1)/2, (double)(box.y1+(box.y2-box.y1)/2), box.x2-box.x1, box.y2-box.y1;
|
||||
tracklet.age = 0;
|
||||
tracklet.hits = 1;
|
||||
tracklet.misses = 0;
|
||||
tracklet.frame_id = tgts.frame_id;
|
||||
tracklet.category_id = tgts.targets[i].category_id;
|
||||
tracklet.tentative = true;
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > new_motion = kf.initiate(tracklet.bbox);
|
||||
tracklet.mean = new_motion.first;
|
||||
tracklet.covariance = new_motion.second;
|
||||
|
||||
tgts.targets[i].tracked_id = this->_next_tracklet_id;
|
||||
tgts.targets[i].has_tid = true;
|
||||
this->_tracklets.push_back(tracklet);
|
||||
}
|
||||
else
|
||||
{
|
||||
sv::Box box;
|
||||
int track_id = match_det[i];
|
||||
tgts.targets[i].getBox(box);
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > updated = kf.update(this->_tracklets[track_id].mean, this->_tracklets[track_id].covariance, box);
|
||||
this->_tracklets[track_id].mean = updated.first;
|
||||
this->_tracklets[track_id].covariance = updated.second;
|
||||
}
|
||||
}
|
||||
|
||||
//sift tracklets
|
||||
for (auto& tracklet : this->_tracklets)
|
||||
{
|
||||
if (tracklet.hits >= _min_hits)
|
||||
{
|
||||
tracklet.tentative = false;
|
||||
}
|
||||
if ((tgts.frame_id-tracklet.frame_id <= _max_age) || (!tracklet.tentative && tracklet.frame_id == tgts.frame_id))
|
||||
{
|
||||
_new_tracklets.push_back(tracklet);
|
||||
}
|
||||
}
|
||||
_tracklets = _new_tracklets;
|
||||
std::vector <Tracklet> ().swap(_new_tracklets);
|
||||
}
|
||||
}
|
||||
|
||||
vector<Tracklet> SORT::getTracklets() const
|
||||
{
|
||||
return this->_tracklets;
|
||||
}
|
||||
|
||||
double SORT::_iou(Tracklet& tracklet, sv::Box& box)
|
||||
{
|
||||
double trackletX1 = tracklet.bbox(0)-tracklet.bbox(2)/2;
|
||||
double trackletY1 = tracklet.bbox(1)-tracklet.bbox(3)/2;
|
||||
double trackletX2 = tracklet.bbox(0) + tracklet.bbox(2)/2;
|
||||
double trackletY2 = tracklet.bbox(1) + tracklet.bbox(3)/2;
|
||||
|
||||
double detectionX1 = box.x1;
|
||||
double detectionY1 = box.y1;
|
||||
double detectionX2 = box.x2;
|
||||
double detectionY2 = box.y2;
|
||||
|
||||
double intersectionX1 = max(trackletX1, detectionX1);
|
||||
double intersectionY1 = max(trackletY1, detectionY1);
|
||||
double intersectionX2 = min(trackletX2, detectionX2);
|
||||
double intersectionY2 = min(trackletY2, detectionY2);
|
||||
|
||||
double w = (intersectionX2-intersectionX1 > 0.0) ? (intersectionX2-intersectionX1) : 0.0;
|
||||
double h = (intersectionY2-intersectionY1 > 0.0) ? (intersectionY2-intersectionY1) : 0.0;
|
||||
double intersectionArea = w * h;
|
||||
|
||||
double trackletArea = tracklet.bbox(2) * tracklet.bbox(3);
|
||||
|
||||
double detectionArea = (box.x2-box.x1) * (box.y2-box.y1);
|
||||
double unionArea = trackletArea + detectionArea - intersectionArea;
|
||||
double iou = (1 - intersectionArea / unionArea * 1.0);
|
||||
|
||||
return iou;
|
||||
}
|
||||
|
||||
// Function to find the minimum element in a vector
|
||||
double SORT::_findMin(const std::vector<double>& vec) {
|
||||
double minVal = std::numeric_limits<double>::max();
|
||||
for (double val : vec) {
|
||||
if (val < minVal) {
|
||||
minVal = val;
|
||||
}
|
||||
}
|
||||
return minVal;
|
||||
}
|
||||
|
||||
// Function to subtract the minimum value from each row of the cost matrix
|
||||
void SORT::_subtractMinFromRows(std::vector<std::vector<double>>& costMatrix) {
|
||||
for (auto& row : costMatrix) {
|
||||
double minVal = _findMin(row);
|
||||
for (double& val : row) {
|
||||
val -= minVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function to subtract the minimum value from each column of the cost matrix
|
||||
void SORT::_subtractMinFromCols(std::vector<std::vector<double>>& costMatrix) {
|
||||
for (size_t col = 0; col < costMatrix[0].size(); ++col) {
|
||||
double minVal = std::numeric_limits<double>::max();
|
||||
for (size_t row = 0; row < costMatrix.size(); ++row) {
|
||||
if (costMatrix[row][col] < minVal) {
|
||||
minVal = costMatrix[row][col];
|
||||
}
|
||||
}
|
||||
for (size_t row = 0; row < costMatrix.size(); ++row) {
|
||||
costMatrix[row][col] -= minVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function to find a matching using the Hungarian algorithm
|
||||
vector<pair<int, int> > SORT::_hungarian(vector<vector<double> > costMatrix)
|
||||
{
|
||||
size_t numRows = costMatrix.size();
|
||||
size_t numCols = costMatrix[0].size();
|
||||
|
||||
//transpose the matrix if necessary
|
||||
const bool transposed = numCols > numRows;
|
||||
if (transposed) {
|
||||
vector<vector<double>> transposedMatrix(numCols, vector<double>(numRows));
|
||||
for (int i = 0; i < numRows; i++)
|
||||
{
|
||||
for (int j = 0; j < numCols; j++)
|
||||
{
|
||||
transposedMatrix[j][i] = costMatrix[i][j];
|
||||
}
|
||||
}
|
||||
costMatrix = transposedMatrix;
|
||||
swap(numRows, numCols);
|
||||
}
|
||||
// Determine the larger dimension for matching
|
||||
size_t maxDim = std::max(numRows, numCols);
|
||||
|
||||
// Create a square cost matrix by padding with zeros if necessary
|
||||
std::vector<std::vector<double>> squareMatrix(maxDim, std::vector<double>(maxDim, 0.0));
|
||||
for (size_t row = 0; row < numRows; ++row) {
|
||||
for (size_t col = 0; col < numCols; ++col) {
|
||||
squareMatrix[row][col] = costMatrix[row][col];
|
||||
}
|
||||
}
|
||||
|
||||
// Subtract the minimum value from each row and column
|
||||
_subtractMinFromRows(squareMatrix);
|
||||
_subtractMinFromCols(squareMatrix);
|
||||
|
||||
// Initialize the assignment vectors with -1 values
|
||||
std::vector<int> rowAssignment(maxDim, -1);
|
||||
std::vector<int> colAssignment(maxDim, -1);
|
||||
|
||||
// Perform the matching
|
||||
for (size_t row = 0; row < maxDim; ++row) {
|
||||
std::vector<bool> visitedCols(maxDim, false);
|
||||
for (size_t col = 0; col < maxDim; ++col) {
|
||||
if (squareMatrix[row][col] == 0 && colAssignment[col] == -1) {
|
||||
rowAssignment[row] = col;
|
||||
colAssignment[col] = row;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the assignment vectors to pair<int, int> format
|
||||
std::vector<std::pair<int, int>> assignmentPairs;
|
||||
for (size_t row = 0; row < numRows; ++row) {
|
||||
int col = rowAssignment[row];
|
||||
//if (col != -1) {
|
||||
// assignmentPairs.emplace_back(row, col);
|
||||
// }
|
||||
if (col != -1) {
|
||||
if (col >= numCols) {
|
||||
col = -1;
|
||||
}
|
||||
assignmentPairs.emplace_back(row, col);
|
||||
}
|
||||
}
|
||||
if (transposed) {
|
||||
for (auto& assignment : assignmentPairs)
|
||||
{
|
||||
swap(assignment.first, assignment.second);
|
||||
}
|
||||
}
|
||||
return assignmentPairs;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "sot_ocv470_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
@ -30,17 +31,47 @@ bool SingleObjectTrackerOCV470Impl::ocv470Setup(SingleObjectTrackerBase* base_)
|
|||
std::string net = get_home() + SV_MODEL_DIR + "dasiamrpn_model.onnx";
|
||||
std::string kernel_cls1 = get_home() + SV_MODEL_DIR + "dasiamrpn_kernel_cls1.onnx";
|
||||
std::string kernel_r1 = get_home() + SV_MODEL_DIR + "dasiamrpn_kernel_r1.onnx";
|
||||
|
||||
std::vector<std::string> files1, files2, files3;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files1, ".onnx", "Ocv-DaSiamRPN-Model-");
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files2, ".onnx", "Ocv-DaSiamRPN-Kernel-CLS1-");
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files3, ".onnx", "Ocv-DaSiamRPN-Kernel-R1-");
|
||||
if (files1.size() > 0 && files2.size() > 0 && files3.size() > 0)
|
||||
{
|
||||
std::sort(files1.rbegin(), files1.rend(), _comp_str_lesser);
|
||||
std::sort(files2.rbegin(), files2.rend(), _comp_str_lesser);
|
||||
std::sort(files3.rbegin(), files3.rend(), _comp_str_lesser);
|
||||
net = get_home() + SV_MODEL_DIR + files1[0];
|
||||
kernel_cls1 = get_home() + SV_MODEL_DIR + files2[0];
|
||||
kernel_r1 = get_home() + SV_MODEL_DIR + files3[0];
|
||||
}
|
||||
std::cout << "Load: " << net << std::endl;
|
||||
std::cout << "Load: " << kernel_cls1 << std::endl;
|
||||
std::cout << "Load: " << kernel_r1 << std::endl;
|
||||
|
||||
std::string backbone = get_home() + SV_MODEL_DIR + "nanotrack_backbone_sim.onnx";
|
||||
std::string neckhead = get_home() + SV_MODEL_DIR + "nanotrack_head_sim.onnx";
|
||||
|
||||
std::vector<std::string> files4, files5;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files4, ".onnx", "Ocv-NanoTrack-Backbone-SIM-");
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files5, ".onnx", "Ocv-NanoTrack-Head-SIM-");
|
||||
if (files4.size() > 0 && files5.size() > 0)
|
||||
{
|
||||
std::sort(files4.rbegin(), files4.rend(), _comp_str_lesser);
|
||||
std::sort(files5.rbegin(), files5.rend(), _comp_str_lesser);
|
||||
backbone = get_home() + SV_MODEL_DIR + files4[0];
|
||||
neckhead = get_home() + SV_MODEL_DIR + files5[0];
|
||||
}
|
||||
std::cout << "Load: " << backbone << std::endl;
|
||||
std::cout << "Load: " << neckhead << std::endl;
|
||||
|
||||
try
|
||||
{
|
||||
TrackerNano::Params nano_params;
|
||||
nano_params.backbone = samples::findFile(backbone);
|
||||
nano_params.neckhead = samples::findFile(neckhead);
|
||||
nano_params.backend = cv::dnn::DNN_BACKEND_CUDA;
|
||||
nano_params.target = cv::dnn::DNN_TARGET_CUDA;
|
||||
nano_params.backend = this->_backend;
|
||||
nano_params.target = this->_target;
|
||||
|
||||
_nano = TrackerNano::create(nano_params);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +1,7 @@
|
|||
#include "veri_det_cuda_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
@ -74,10 +75,20 @@ namespace sv
|
|||
bool VeriDetectorCUDAImpl::cudaSetup()
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
std::string trt_model_fn = get_home() + SV_MODEL_DIR + "model.engine";
|
||||
std::string trt_model_fn = get_home() + SV_MODEL_DIR + "veri.engine";
|
||||
|
||||
std::vector<std::string> files;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-VERI-mobilenet_v3");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
trt_model_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
std::cout << "Load: " << trt_model_fn << std::endl;
|
||||
|
||||
if (!is_file_exist(trt_model_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the VERI TensorRT model (File Not Exist)");
|
||||
throw std::runtime_error("SpireCV (104) Error loading the VeriDetector TensorRT model (File Not Exist)");
|
||||
}
|
||||
char *trt_model_stream{nullptr};
|
||||
size_t trt_model_size{0};
|
||||
|
@ -107,7 +118,7 @@ namespace sv
|
|||
|
||||
delete[] trt_model_stream;
|
||||
const ICudaEngine &cu_engine = this->_trt_context->getEngine();
|
||||
assert(cu_engine.getNbBindings() == 2);
|
||||
assert(cu_engine.getNbBindings() == 3);
|
||||
|
||||
this->_input_index = cu_engine.getBindingIndex("input");
|
||||
this->_output_index1 = cu_engine.getBindingIndex("output");
|
||||
|
@ -123,7 +134,6 @@ namespace sv
|
|||
this->_p_data = new float[2 * 3 * 224 * 224];
|
||||
this->_p_prob1 = new float[2 * 576];
|
||||
this->_p_prob2 = new float[2 * 1280];
|
||||
this->_p_prob3 = new float[2 * 1280];
|
||||
// Input
|
||||
TRTCHECK(cudaMemcpyAsync(_p_buffers[this->_input_index], this->_p_data, 2 * 3 * 224 * 224 * sizeof(float), cudaMemcpyHostToDevice, this->_cu_stream));
|
||||
// this->_trt_context->enqueue(1, _p_buffers, this->_cu_stream, nullptr);
|
||||
|
@ -139,11 +149,12 @@ namespace sv
|
|||
|
||||
void VeriDetectorCUDAImpl::cudaRoiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<int> &output_labels_)
|
||||
std::vector<float> &output_labels_)
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
|
||||
for (int i = 0; i < 2; ++i)
|
||||
|
||||
for (int i = 0; i < 2; i++)
|
||||
{
|
||||
for (int row = 0; row < 224; ++row)
|
||||
{
|
||||
|
@ -151,14 +162,15 @@ namespace sv
|
|||
for (int col = 0; col < 224; ++col)
|
||||
{
|
||||
// mean=[136.20, 141.50, 145.41], std=[44.77, 44.20, 44.30]
|
||||
this->_p_data[224 * 224 * 3 * i + col + row * 224] = ((float)uc_pixel[0] - 136.20f) / 44.77f;
|
||||
this->_p_data[224 * 224 * 3 * i + col + row * 224 + 224 * 224] = ((float)uc_pixel[1] - 141.50f) / 44.20f;
|
||||
this->_p_data[224 * 224 * 3 * i + col + row * 224 + 224 * 224 * 2] = ((float)uc_pixel[2] - 145.41f) / 44.30f;
|
||||
this->_p_data[col + row * 224 + 224 * 224 * 3 * i] = ((float)uc_pixel[0] - 136.20f) / 44.77f;
|
||||
this->_p_data[col + row * 224 + 224 * 224 + 224 * 224 * 3 * i] = ((float)uc_pixel[1] - 141.50f) / 44.20f;
|
||||
this->_p_data[col + row * 224 + 224 * 224 * 2 + 224 * 224 * 3 * i] = ((float)uc_pixel[2] - 145.41f) / 44.30f;
|
||||
uc_pixel += 3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Input
|
||||
TRTCHECK(cudaMemcpyAsync(_p_buffers[this->_input_index], this->_p_data, 2 * 3 * 224 * 224 * sizeof(float), cudaMemcpyHostToDevice, this->_cu_stream));
|
||||
// this->_trt_context->enqueue(1, _p_buffers, this->_cu_stream, nullptr);
|
||||
|
@ -180,10 +192,9 @@ namespace sv
|
|||
}
|
||||
}
|
||||
|
||||
// 计算两个数组的余弦相似性
|
||||
float similarity = cosineSimilarity(_p_prob2, _p_prob2 + 1280, 1280);
|
||||
std::cout << "余弦相似性: " << similarity << std::endl;
|
||||
std::cout << "VERI LABEL: " << label << std::endl;
|
||||
float similarity = cosineSimilarity(this->_p_prob2, this->_p_prob2 + 1280, 1280);
|
||||
output_labels_.push_back(label);
|
||||
output_labels_.push_back(similarity);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -29,14 +29,13 @@ public:
|
|||
bool cudaSetup();
|
||||
void cudaRoiCNN(
|
||||
std::vector<cv::Mat>& input_rois_,
|
||||
std::vector<int>& output_labels_
|
||||
std::vector<float>& output_labels_
|
||||
);
|
||||
|
||||
#ifdef WITH_CUDA
|
||||
float *_p_data;
|
||||
float *_p_prob1;
|
||||
float *_p_prob2;
|
||||
float *_p_prob3;
|
||||
nvinfer1::IExecutionContext *_trt_context;
|
||||
int _input_index;
|
||||
int _output_index1;
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
#include "veri_det_intel_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
||||
#include <iostream>
|
||||
#include <cmath>
|
||||
int BAT = 1;
|
||||
float cosineSimilarity(float *vec1, float *vec2, int size)
|
||||
{
|
||||
// 计算向量的点积
|
||||
float dotProduct = 0.0f;
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
dotProduct += vec1[i] * vec2[i];
|
||||
}
|
||||
|
||||
// 计算向量的模长
|
||||
float magnitudeVec1 = 0.0f;
|
||||
float magnitudeVec2 = 0.0f;
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
magnitudeVec1 += vec1[i] * vec1[i];
|
||||
magnitudeVec2 += vec2[i] * vec2[i];
|
||||
}
|
||||
magnitudeVec1 = std::sqrt(magnitudeVec1);
|
||||
magnitudeVec2 = std::sqrt(magnitudeVec2);
|
||||
|
||||
// 计算余弦相似性
|
||||
float similarity = dotProduct / (magnitudeVec1 * magnitudeVec2);
|
||||
|
||||
return similarity;
|
||||
}
|
||||
|
||||
namespace sv
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
using namespace dnn;
|
||||
#endif
|
||||
|
||||
VeriDetectorIntelImpl::VeriDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
VeriDetectorIntelImpl::~VeriDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
bool VeriDetectorIntelImpl::intelSetup()
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
std::string onnx_model_fn = get_home() + SV_MODEL_DIR + "veri.onnx";
|
||||
if (!is_file_exist(onnx_model_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the VeriDetector openVINO model (File Not Exist)");
|
||||
}
|
||||
|
||||
// OpenVINO
|
||||
ov::Core core;
|
||||
this->compiled_model = core.compile_model(onnx_model_fn, "GPU");
|
||||
this->infer_request = compiled_model.create_infer_request();
|
||||
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void VeriDetectorIntelImpl::intelRoiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<float> &output_labels_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
|
||||
Mat blobs;
|
||||
blobFromImages(input_rois_, blobs, 1 / 255.0, Size(224, 224), Scalar(0, 0, 0), true, true);
|
||||
|
||||
auto input_port = compiled_model.input();
|
||||
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blobs.ptr(0));
|
||||
|
||||
infer_request.infer();
|
||||
|
||||
const ov::Tensor &label_pre = infer_request.get_output_tensor(0);
|
||||
this->_p_prob1 = label_pre.data<float>();
|
||||
|
||||
const ov::Tensor &proto_pre = infer_request.get_output_tensor(1);
|
||||
this->_p_prob2 = proto_pre.data<float>();
|
||||
|
||||
// Find max index
|
||||
double max = 0;
|
||||
int label = 0;
|
||||
for (int i = 0; i < 576; ++i)
|
||||
{
|
||||
if (max < this->_p_prob1[i])
|
||||
{
|
||||
max = this->_p_prob1[i];
|
||||
label = i;
|
||||
}
|
||||
}
|
||||
|
||||
float similarity = cosineSimilarity(this->_p_prob2, this->_p_prob2 + 1280, 1280);
|
||||
|
||||
output_labels_.push_back(label);
|
||||
output_labels_.push_back(similarity);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
#ifndef __SV_VERI_DET_INTEL__
|
||||
#define __SV_VERI_DET_INTEL__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/aruco.hpp>
|
||||
#include <opencv2/tracking.hpp>
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#endif
|
||||
|
||||
namespace sv
|
||||
{
|
||||
class VeriDetectorIntelImpl
|
||||
{
|
||||
public:
|
||||
VeriDetectorIntelImpl();
|
||||
~VeriDetectorIntelImpl();
|
||||
|
||||
bool intelSetup();
|
||||
void intelRoiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<float> &output_labels_);
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
|
||||
float *_p_data;
|
||||
float *_p_prob1;
|
||||
float *_p_prob2;
|
||||
|
||||
ov::Tensor input_tensor;
|
||||
ov::InferRequest infer_request;
|
||||
ov::CompiledModel compiled_model;
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
|
@ -1,61 +1,168 @@
|
|||
#include "sv_veri_det.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "gason.h"
|
||||
#include "sv_util.h"
|
||||
|
||||
#ifdef WITH_CUDA
|
||||
#include <NvInfer.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
#include "veri_det_cuda_impl.h"
|
||||
#endif
|
||||
|
||||
|
||||
namespace sv {
|
||||
|
||||
|
||||
VeriDetector::VeriDetector()
|
||||
{
|
||||
this->_cuda_impl = new VeriDetectorCUDAImpl;
|
||||
}
|
||||
VeriDetector::~VeriDetector()
|
||||
{
|
||||
}
|
||||
|
||||
bool VeriDetector::setupImpl()
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
return this->_cuda_impl->cudaSetup();
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#include "veri_det_intel_impl.h"
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void VeriDetector::roiCNN(
|
||||
std::vector<cv::Mat>& input_rois_,
|
||||
std::vector<int>& output_labels_
|
||||
)
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
||||
namespace sv
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
this->_cuda_impl->cudaRoiCNN(
|
||||
input_rois_,
|
||||
output_labels_
|
||||
);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
void VeriDetector::detect(cv::Mat img1_, cv::Mat img2_, TargetsInFrame& tgts_)
|
||||
{
|
||||
if (!_params_loaded)
|
||||
VeriDetector::VeriDetector()
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
this->_cuda_impl = new VeriDetectorCUDAImpl;
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl = new VeriDetectorIntelImpl;
|
||||
#endif
|
||||
}
|
||||
VeriDetector::~VeriDetector()
|
||||
{
|
||||
this->_load();
|
||||
this->_loadLabels();
|
||||
_params_loaded = true;
|
||||
}
|
||||
|
||||
std::vector<cv::Mat> e_roi = {img1_, img2_};
|
||||
void VeriDetector::_load()
|
||||
{
|
||||
JsonValue all_value;
|
||||
JsonAllocator allocator;
|
||||
_load_all_json(this->alg_params_fn, all_value, allocator);
|
||||
|
||||
std::vector<int> output_labels;
|
||||
roiCNN(e_roi, output_labels);
|
||||
}
|
||||
JsonValue veriliner_params_value;
|
||||
_parser_algorithm_params("VeriDetector", all_value, veriliner_params_value);
|
||||
|
||||
for (auto i : veriliner_params_value)
|
||||
{
|
||||
if ("vehicle_ID" == std::string(i->key))
|
||||
{
|
||||
this->vehicle_id = i->value.toString();
|
||||
std::cout << "vehicle_ID Load Sucess!" << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool VeriDetector::setupImpl()
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
return this->_cuda_impl->cudaSetup();
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
return this->_intel_impl->intelSetup();
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void VeriDetector::roiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<float> &output_labels_)
|
||||
{
|
||||
#ifdef WITH_CUDA
|
||||
this->_cuda_impl->cudaRoiCNN(
|
||||
input_rois_,
|
||||
output_labels_);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl->intelRoiCNN(
|
||||
input_rois_,
|
||||
output_labels_);
|
||||
#endif
|
||||
}
|
||||
|
||||
void VeriDetector::detect(cv::Mat img_, const cv::Rect &bounding_box_, sv::Target &tgt)
|
||||
{
|
||||
if (!_params_loaded)
|
||||
{
|
||||
this->_load();
|
||||
this->_loadLabels();
|
||||
_params_loaded = true;
|
||||
}
|
||||
|
||||
// convert Rect2d from left-up to center.
|
||||
targetPos[0] = float(bounding_box_.x) + float(bounding_box_.width) * 0.5f;
|
||||
targetPos[1] = float(bounding_box_.y) + float(bounding_box_.height) * 0.5f;
|
||||
|
||||
targetSz[0] = float(bounding_box_.width);
|
||||
targetSz[1] = float(bounding_box_.height);
|
||||
|
||||
// Extent the bounding box.
|
||||
float sumSz = targetSz[0] + targetSz[1];
|
||||
float wExtent = targetSz[0] + 0.5 * (sumSz);
|
||||
float hExtent = targetSz[1] + 0.5 * (sumSz);
|
||||
int sz = int(cv::sqrt(wExtent * hExtent));
|
||||
|
||||
cv::Mat crop;
|
||||
getSubwindow(crop, img_, sz, 224);
|
||||
|
||||
std::string img_ground_dir = get_home() + SV_ROOT_DIR + this->vehicle_id;
|
||||
cv::Mat img_ground = cv::imread(img_ground_dir);
|
||||
cv::resize(img_ground, img_ground, cv::Size(224, 224));
|
||||
std::vector<cv::Mat> input_rois_ = {crop, img_ground};
|
||||
|
||||
std::vector<float> output_labels;
|
||||
#ifdef WITH_CUDA
|
||||
roiCNN(input_rois_, output_labels);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
roiCNN(input_rois_, output_labels);
|
||||
#endif
|
||||
|
||||
if (output_labels.size() > 0)
|
||||
{
|
||||
tgt.sim_score = output_labels[1];
|
||||
}
|
||||
}
|
||||
|
||||
void VeriDetector::getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz)
|
||||
{
|
||||
cv::Scalar avgChans = mean(srcImg);
|
||||
cv::Size imgSz = srcImg.size();
|
||||
int c = (originalSz + 1) / 2;
|
||||
|
||||
int context_xmin = (int)(targetPos[0]) - c;
|
||||
int context_xmax = context_xmin + originalSz - 1;
|
||||
int context_ymin = (int)(targetPos[1]) - c;
|
||||
int context_ymax = context_ymin + originalSz - 1;
|
||||
|
||||
int left_pad = std::max(0, -context_xmin);
|
||||
int top_pad = std::max(0, -context_ymin);
|
||||
int right_pad = std::max(0, context_xmax - imgSz.width + 1);
|
||||
int bottom_pad = std::max(0, context_ymax - imgSz.height + 1);
|
||||
|
||||
context_xmin += left_pad;
|
||||
context_xmax += left_pad;
|
||||
context_ymin += top_pad;
|
||||
context_ymax += top_pad;
|
||||
|
||||
cv::Mat cropImg;
|
||||
if (left_pad == 0 && top_pad == 0 && right_pad == 0 && bottom_pad == 0)
|
||||
{
|
||||
// Crop image without padding.
|
||||
cropImg = srcImg(cv::Rect(context_xmin, context_ymin,
|
||||
context_xmax - context_xmin + 1, context_ymax - context_ymin + 1));
|
||||
}
|
||||
else // Crop image with padding, and the padding value is avgChans
|
||||
{
|
||||
cv::Mat tmpMat;
|
||||
cv::copyMakeBorder(srcImg, tmpMat, top_pad, bottom_pad, left_pad, right_pad, cv::BORDER_CONSTANT, avgChans);
|
||||
cropImg = tmpMat(cv::Rect(context_xmin, context_ymin, context_xmax - context_xmin + 1, context_ymax - context_ymin + 1));
|
||||
}
|
||||
resize(cropImg, dstCrop, cv::Size(resizeSz, resizeSz));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
%YAML:1.0
|
||||
---
|
||||
calibration_time: "2021年01月12日 星期二 18时08分01秒"
|
||||
image_width: 1280
|
||||
image_height: 720
|
||||
flags: 0
|
||||
camera_matrix: !!opencv-matrix
|
||||
rows: 3
|
||||
cols: 3
|
||||
dt: d
|
||||
data: [ 919.698486328125, 0.0, 638.8856201171875, 0.0, 920.1767578125, 370.9995422363281, 0.0, 0.0, 1.0]
|
||||
distortion_coefficients: !!opencv-matrix
|
||||
rows: 1
|
||||
cols: 5
|
||||
dt: d
|
||||
data: [ 0, 0, 0, 0, 0]
|
||||
avg_reprojection_error: 0
|
|
@ -1,20 +0,0 @@
|
|||
%YAML:1.0
|
||||
---
|
||||
calibration_time: "2021年01月12日 星期二 18时08分01秒"
|
||||
image_width: 1920
|
||||
image_height: 1080
|
||||
flags: 0
|
||||
camera_matrix: !!opencv-matrix
|
||||
rows: 3
|
||||
cols: 3
|
||||
dt: d
|
||||
data: [ 7.9379415710551370e+02, 0., 2.9783879354295328e+02, 0.,
|
||||
7.9491985564466654e+02, 3.0942416136837386e+02, 0., 0., 1. ]
|
||||
distortion_coefficients: !!opencv-matrix
|
||||
rows: 1
|
||||
cols: 5
|
||||
dt: d
|
||||
data: [ 2.0950200339181715e-01, -1.1587468096518483e+00,
|
||||
5.5342063671841328e-03, 2.2214393775334758e-04,
|
||||
1.7127431916651392e+00 ]
|
||||
avg_reprojection_error: 2.8342964851391211e-01
|
|
@ -1,20 +0,0 @@
|
|||
%YAML:1.0
|
||||
---
|
||||
calibration_time: "2021年01月12日 星期二 18时08分01秒"
|
||||
image_width: 640
|
||||
image_height: 480
|
||||
flags: 0
|
||||
camera_matrix: !!opencv-matrix
|
||||
rows: 3
|
||||
cols: 3
|
||||
dt: d
|
||||
data: [ 7.9379415710551370e+02, 0., 2.9783879354295328e+02, 0.,
|
||||
7.9491985564466654e+02, 3.0942416136837386e+02, 0., 0., 1. ]
|
||||
distortion_coefficients: !!opencv-matrix
|
||||
rows: 1
|
||||
cols: 5
|
||||
dt: d
|
||||
data: [ 2.0950200339181715e-01, -1.1587468096518483e+00,
|
||||
5.5342063671841328e-03, 2.2214393775334758e-04,
|
||||
1.7127431916651392e+00 ]
|
||||
avg_reprojection_error: 2.8342964851391211e-01
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:10:06
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-08-16 21:53:43
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/AT10/AT10_gimbal_crc32.h
|
||||
* @LastEditTime: 2023-12-05 16:30:27
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/AT10/AT10_gimbal_crc32.h
|
||||
*/
|
||||
#ifndef AT10_GIMBAL_CRC32_H
|
||||
#define AT10_GIMBAL_CRC32_H
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:10:06
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-08-25 19:39:56
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/AT10/AT10_gimbal_driver.cpp
|
||||
* @LastEditTime: 2023-12-06 10:27:59
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/AT10/AT10_gimbal_driver.cpp
|
||||
*/
|
||||
#include "AT10_gimbal_driver.h"
|
||||
#include "AT10_gimbal_crc32.h"
|
||||
|
@ -24,13 +24,6 @@ AT10GimbalDriver::AT10GimbalDriver(amovGimbal::IOStreamBase *_IO) : amovGimbal::
|
|||
stdRxQueue = new fifoRing(sizeof(AT10::GIMBAL_STD_FRAME_T), MAX_QUEUE_SIZE);
|
||||
stdTxQueue = new fifoRing(sizeof(AT10::GIMBAL_STD_FRAME_T), MAX_QUEUE_SIZE);
|
||||
parserState = AT10::GIMBAL_SERIAL_STATE_IDLE;
|
||||
|
||||
// Initialize and enable attitude data return (50Hz)
|
||||
// uint8_t cmd = 0XFF;
|
||||
// pack(AT10::GIMBAL_CMD_SET_FEEDBACK_H, &cmd, 1);
|
||||
// pack(AT10::GIMBAL_CMD_SET_FEEDBACK_L, &cmd, 1);
|
||||
// cmd = 0X00;
|
||||
// pack(AT10::GIMBAL_CMD_OPEN_FEEDBACK, &cmd, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -84,21 +77,7 @@ void AT10GimbalDriver::convert(void *buf)
|
|||
switch (temp->head)
|
||||
{
|
||||
case AT10::GIMBAL_CMD_RCV_STATE:
|
||||
std::cout << "Undefined old frame from AT10";
|
||||
// AT10::GIMBAL_RCV_POS_MSG_T *tempPos;
|
||||
// tempPos = reinterpret_cast<AT10::GIMBAL_RCV_POS_MSG_T *>(((uint8_t *)buf) + AT10_EXT_PAYLOAD_OFFSET);
|
||||
// mState.lock();
|
||||
// state.abs.yaw = tempPos->yawIMUAngle * AT10_EXT_SCALE_FACTOR_ANGLE;
|
||||
// state.abs.roll = tempPos->rollIMUAngle * AT10_EXT_SCALE_FACTOR_ANGLE;
|
||||
// state.abs.pitch = tempPos->pitchIMUAngle * AT10_EXT_SCALE_FACTOR_ANGLE;
|
||||
// state.rel.yaw = tempPos->yawStatorRotorAngle * AT10_EXT_SCALE_FACTOR_SPEED;
|
||||
// state.rel.roll = tempPos->rollStatorRotorAngle * AT10_EXT_SCALE_FACTOR_SPEED;
|
||||
// state.rel.pitch = tempPos->pitchStatorRotorAngle * AT10_EXT_SCALE_FACTOR_SPEED;
|
||||
// updateGimbalStateCallback(state.rel.roll, state.rel.pitch, state.rel.yaw,
|
||||
// state.abs.roll, state.abs.pitch, state.abs.yaw,
|
||||
// state.fov.x, state.fov.y);
|
||||
// mState.unlock();
|
||||
|
||||
std::cout << "Undefined old frame from AT10\r\n";
|
||||
break;
|
||||
|
||||
case AT10::GIMBAL_CMD_STD:
|
||||
|
@ -123,15 +102,15 @@ void AT10GimbalDriver::convert(void *buf)
|
|||
state.fov.y = amovGimbalTools::conversionBigLittle(tempRcv->D1.fovY) * 0.1;
|
||||
if ((amovGimbalTools::conversionBigLittle(tempRcv->D1.camera) & 0X0003) == 0X01)
|
||||
{
|
||||
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_TAKE;
|
||||
state.video = AMOV_GIMBAL_VIDEO_TAKE;
|
||||
}
|
||||
else
|
||||
{
|
||||
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_OFF;
|
||||
state.video = AMOV_GIMBAL_VIDEO_OFF;
|
||||
}
|
||||
updateGimbalStateCallback(state.rel.roll, state.rel.pitch, state.rel.yaw,
|
||||
state.abs.roll, state.abs.pitch, state.abs.yaw,
|
||||
state.fov.x, state.fov.y);
|
||||
state.fov.x, state.fov.y, updataCaller);
|
||||
mState.unlock();
|
||||
break;
|
||||
|
||||
|
@ -316,21 +295,9 @@ bool AT10GimbalDriver::parser(IN uint8_t byte)
|
|||
|
||||
void AT10GimbalDriver::sendHeart(void)
|
||||
{
|
||||
// uint8_t tempBuffer[72];
|
||||
uint8_t temp = 0X00;
|
||||
while (1)
|
||||
{
|
||||
// if (!IO->isBusy() && IO->isOpen())
|
||||
// {
|
||||
// bool state = false;
|
||||
|
||||
// state = txQueue->outCell(&tempBuffer);
|
||||
// if (state)
|
||||
// {
|
||||
// IO->outPutBytes((uint8_t *)&tempBuffer,
|
||||
// reinterpret_cast<AT10::GIMBAL_EXTEND_FRAME_T *>(tempBuffer)->len + AT10_EXT_PAYLOAD_OFFSET + sizeof(uint8_t));
|
||||
// }
|
||||
// }
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
pack(AT10::GIMBAL_CMD_STD_HEART, &temp, sizeof(temp));
|
||||
}
|
||||
|
@ -363,19 +330,26 @@ void AT10GimbalDriver::stackStart(void)
|
|||
}
|
||||
std::thread sendHeartLoop(&AT10GimbalDriver::sendHeart, this);
|
||||
std::thread sendStdLoop(&AT10GimbalDriver::sendStd, this);
|
||||
|
||||
this->sendThreadHanle = sendStdLoop.native_handle();
|
||||
this->sendHreatThreadHandle = sendHeartLoop.native_handle();
|
||||
|
||||
sendHeartLoop.detach();
|
||||
sendStdLoop.detach();
|
||||
}
|
||||
|
||||
void AT10GimbalDriver::parserLoop(void)
|
||||
{
|
||||
uint8_t temp;
|
||||
uint8_t temp[65536];
|
||||
uint32_t i = 0, getCount = 0;
|
||||
|
||||
while (1)
|
||||
{
|
||||
if (IO->inPutByte(&temp))
|
||||
getCount = IO->inPutBytes(temp);
|
||||
|
||||
for (i = 0; i < getCount; i++)
|
||||
{
|
||||
parser(temp);
|
||||
parser(temp[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +361,7 @@ void AT10GimbalDriver::getStdRxPack(void)
|
|||
{
|
||||
if (stdRxQueue->outCell(tempBuffer))
|
||||
{
|
||||
msgCustomCallback(tempBuffer);
|
||||
msgCustomCallback(tempBuffer, msgCaller);
|
||||
convert(tempBuffer);
|
||||
}
|
||||
}
|
||||
|
@ -400,20 +374,25 @@ void AT10GimbalDriver::getExtRxPack(void)
|
|||
{
|
||||
if (rxQueue->outCell(tempBuffer))
|
||||
{
|
||||
msgCustomCallback(tempBuffer);
|
||||
msgCustomCallback(tempBuffer, msgCaller);
|
||||
convert(tempBuffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AT10GimbalDriver::parserStart(amovGimbal::pStateInvoke callback)
|
||||
void AT10GimbalDriver::parserStart(pAmovGimbalStateInvoke callback, void *caller)
|
||||
{
|
||||
this->updateGimbalStateCallback = callback;
|
||||
this->updataCaller = caller;
|
||||
|
||||
std::thread parser(&AT10GimbalDriver::parserLoop, this);
|
||||
std::thread getStdRxPackLoop(&AT10GimbalDriver::getStdRxPack, this);
|
||||
std::thread getExtRxPackLooP(&AT10GimbalDriver::getExtRxPack, this);
|
||||
|
||||
this->parserThreadHanle = parser.native_handle();
|
||||
this->stackThreadHanle = getStdRxPackLoop.native_handle();
|
||||
this->extStackThreadHandle = getExtRxPackLooP.native_handle();
|
||||
|
||||
parser.detach();
|
||||
getStdRxPackLoop.detach();
|
||||
getExtRxPackLooP.detach();
|
||||
|
|
|
@ -3,19 +3,17 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-28 12:24:21
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-08-25 19:28:55
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/AT10/AT10_gimbal_driver.h
|
||||
* @LastEditTime: 2023-12-06 10:27:48
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/AT10/AT10_gimbal_driver.h
|
||||
*/
|
||||
#include "../amov_gimbal.h"
|
||||
#ifndef __AT10_DRIVER_H
|
||||
#define __AT10_DRIVER_H
|
||||
#include "../amov_gimbal_private.h"
|
||||
#include "AT10_gimbal_struct.h"
|
||||
#include <mutex>
|
||||
#include <malloc.h>
|
||||
#include <iostream>
|
||||
|
||||
#ifndef __AT10_DRIVER_H
|
||||
#define __AT10_DRIVER_H
|
||||
|
||||
class AT10GimbalDriver : protected amovGimbal::amovGimbalBase
|
||||
{
|
||||
private:
|
||||
|
@ -32,12 +30,13 @@ private:
|
|||
void sendHeart(void);
|
||||
void sendStd(void);
|
||||
|
||||
void parserStart(amovGimbal::pStateInvoke callback);
|
||||
void parserStart(pAmovGimbalStateInvoke callback, void *caller);
|
||||
void parserLoop(void);
|
||||
void getExtRxPack(void);
|
||||
void getStdRxPack(void);
|
||||
|
||||
// bool getRxPack(OUT void *pack);
|
||||
std::thread::native_handle_type sendHreatThreadHandle;
|
||||
std::thread::native_handle_type extStackThreadHandle;
|
||||
|
||||
bool parser(IN uint8_t byte);
|
||||
void convert(void *buf);
|
||||
|
@ -46,18 +45,18 @@ private:
|
|||
|
||||
public:
|
||||
// funtions
|
||||
uint32_t setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos);
|
||||
uint32_t setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed);
|
||||
uint32_t setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed);
|
||||
uint32_t setGimabalPos(const AMOV_GIMBAL_POS_T &pos);
|
||||
uint32_t setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed);
|
||||
uint32_t setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed);
|
||||
uint32_t setGimabalHome(void);
|
||||
|
||||
uint32_t setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
uint32_t setGimbalFocus(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
uint32_t setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
uint32_t setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
|
||||
uint32_t takePic(void);
|
||||
uint32_t setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState);
|
||||
uint32_t setVideo(const AMOV_GIMBAL_VIDEO_T newState);
|
||||
|
||||
uint32_t extensionFuntions(void* cmd);
|
||||
uint32_t extensionFuntions(void *cmd);
|
||||
|
||||
// builds
|
||||
static amovGimbal::amovGimbalBase *creat(amovGimbal::IOStreamBase *_IO)
|
||||
|
@ -76,6 +75,13 @@ public:
|
|||
{
|
||||
delete stdTxQueue;
|
||||
}
|
||||
// set thread kill anytime
|
||||
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
|
||||
parserThreadHanle = parserThreadHanle == 0 ? 0 : pthread_cancel(parserThreadHanle);
|
||||
sendThreadHanle = sendThreadHanle == 0 ? 0 : pthread_cancel(sendThreadHanle);
|
||||
stackThreadHanle = stackThreadHanle == 0 ? 0 : pthread_cancel(stackThreadHanle);
|
||||
sendHreatThreadHandle = sendHreatThreadHandle == 0 ? 0 : pthread_cancel(sendHreatThreadHandle);
|
||||
extStackThreadHandle = extStackThreadHandle == 0 ? 0 : pthread_cancel(extStackThreadHandle);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2023-03-02 10:00:52
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-09-07 10:56:15
|
||||
* @LastEditTime: 2023-11-27 16:27:18
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/AT10/AT10_gimbal_funtion.cpp
|
||||
*/
|
||||
#include "AT10_gimbal_driver.h"
|
||||
|
@ -17,7 +17,7 @@
|
|||
*
|
||||
* @return The return value is the number of bytes written to the buffer.
|
||||
*/
|
||||
uint32_t AT10GimbalDriver::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos)
|
||||
uint32_t AT10GimbalDriver::setGimabalPos(const AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
int16_t yaw, pitch;
|
||||
AT10::GIMBAL_CMD_A1_MSG_T temp;
|
||||
|
@ -35,14 +35,14 @@ uint32_t AT10GimbalDriver::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &po
|
|||
}
|
||||
|
||||
/**
|
||||
* It takes a struct of type amovGimbal::AMOV_GIMBAL_POS_T and converts it to a struct of type
|
||||
* It takes a struct of type AMOV_GIMBAL_POS_T and converts it to a struct of type
|
||||
* G1::GIMBAL_SET_POS_MSG_T
|
||||
*
|
||||
* @param speed the speed of the gimbal
|
||||
*
|
||||
* @return The return value is the number of bytes written to the buffer.
|
||||
*/
|
||||
uint32_t AT10GimbalDriver::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed)
|
||||
uint32_t AT10GimbalDriver::setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed)
|
||||
{
|
||||
int16_t speedYaw, speedPitch;
|
||||
AT10::GIMBAL_CMD_A1_MSG_T temp;
|
||||
|
@ -66,7 +66,7 @@ uint32_t AT10GimbalDriver::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &
|
|||
*
|
||||
* @return The return value is the number of bytes written to the buffer.
|
||||
*/
|
||||
uint32_t AT10GimbalDriver::setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed)
|
||||
uint32_t AT10GimbalDriver::setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed)
|
||||
{
|
||||
state.maxFollow.pitch = fabs(followSpeed.pitch * 100);
|
||||
state.maxFollow.yaw = fabs(followSpeed.yaw * 100);
|
||||
|
@ -110,10 +110,10 @@ uint32_t AT10GimbalDriver::takePic(void)
|
|||
*
|
||||
* @return The return value is the number of bytes written to the serial port.
|
||||
*/
|
||||
uint32_t AT10GimbalDriver::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState)
|
||||
uint32_t AT10GimbalDriver::setVideo(const AMOV_GIMBAL_VIDEO_T newState)
|
||||
{
|
||||
uint16_t temp;
|
||||
if (newState == amovGimbal::AMOV_GIMBAL_VIDEO_T::AMOV_GIMBAL_VIDEO_TAKE)
|
||||
if (newState == AMOV_GIMBAL_VIDEO_T::AMOV_GIMBAL_VIDEO_TAKE)
|
||||
{
|
||||
temp = 0x14 << 3;
|
||||
}
|
||||
|
@ -127,20 +127,20 @@ uint32_t AT10GimbalDriver::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newSta
|
|||
return pack(AT10::GIMBAL_CMD_STD_CAMERA, (uint8_t *)&data, sizeof(uint16_t));
|
||||
}
|
||||
|
||||
uint32_t AT10GimbalDriver::setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
uint32_t AT10GimbalDriver::setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
if (targetRate == 0.0f)
|
||||
{
|
||||
uint16_t temp = 0;
|
||||
switch (zoom)
|
||||
{
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_IN:
|
||||
case AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_IN:
|
||||
temp = 0X08 << 3;
|
||||
break;
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_OUT:
|
||||
case AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_OUT:
|
||||
temp = 0X09 << 3;
|
||||
break;
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_STOP:
|
||||
case AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_STOP:
|
||||
temp = 0X01 << 3;
|
||||
break;
|
||||
default:
|
||||
|
@ -160,18 +160,18 @@ uint32_t AT10GimbalDriver::setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, fl
|
|||
}
|
||||
}
|
||||
|
||||
uint32_t AT10GimbalDriver::setGimbalFocus(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
uint32_t AT10GimbalDriver::setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
uint16_t temp = 0;
|
||||
switch (zoom)
|
||||
{
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_IN:
|
||||
case AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_IN:
|
||||
temp = 0X0B << 3;
|
||||
break;
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_OUT:
|
||||
case AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_OUT:
|
||||
temp = 0X0A << 3;
|
||||
break;
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_STOP:
|
||||
case AMOV_GIMBAL_ZOOM_T::AMOV_GIMBAL_ZOOM_STOP:
|
||||
temp = 0X01 << 3;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
add_library(AMOV_Gimbal ${LIB_FLAG})
|
||||
|
||||
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb -fPIC")
|
||||
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall -fPIC")
|
||||
|
||||
add_definitions(
|
||||
-DMAX_QUEUE_SIZE=100
|
||||
)
|
||||
|
||||
add_subdirectory(FIFO)
|
||||
########## add types of gimbal ##############
|
||||
add_subdirectory(G1)
|
||||
add_subdirectory(G2)
|
||||
add_subdirectory(Q10f)
|
||||
add_subdirectory(AT10)
|
||||
|
||||
file(GLOB LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
|
||||
|
||||
target_sources(AMOV_Gimbal
|
||||
PRIVATE
|
||||
${LIB_FILES}
|
||||
)
|
||||
|
||||
target_link_libraries(AMOV_Gimbal
|
||||
PRIVATE
|
||||
Gimabl_G1
|
||||
Gimabl_G2
|
||||
Gimabl_Q10f
|
||||
Gimabl_AT10
|
||||
)
|
||||
|
||||
target_include_directories(AMOV_Gimbal
|
||||
PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
)
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
add_library(FIFO)
|
||||
|
||||
file(GLOB LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
|
||||
|
||||
target_sources(FIFO
|
||||
PRIVATE
|
||||
${LIB_FILES}
|
||||
)
|
||||
|
||||
target_include_directories(FIFO
|
||||
PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
)
|
|
@ -3,8 +3,8 @@
|
|||
* @Author : Aiyangsky
|
||||
* @Date : 2022-08-26 21:42:10
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-07-21 16:10:16
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/FIFO/Ring_Fifo.cpp
|
||||
* @LastEditTime: 2023-11-28 11:47:34
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/FIFO/Ring_Fifo.cpp
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author : Aiyangsky
|
||||
* @Date : 2022-08-26 21:42:02
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-08-16 21:22:46
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/FIFO/Ring_Fifo.h
|
||||
* @LastEditTime: 2023-11-28 11:47:39
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/FIFO/Ring_Fifo.h
|
||||
*/
|
||||
|
||||
#ifndef RING_FIFO_H
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:10:06
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2022-10-28 14:10:02
|
||||
* @FilePath: \amov-gimbal-sdk\src\G1\g1_gimbal_crc32.h
|
||||
* @LastEditTime: 2023-12-05 16:30:13
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/G1/g1_gimbal_crc32.h
|
||||
*/
|
||||
#ifndef G1_GIMBAL_CRC32_H
|
||||
#define G1_GIMBAL_CRC32_H
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:10:06
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-09-07 11:01:25
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/G1/g1_gimbal_driver.cpp
|
||||
* @LastEditTime: 2023-12-05 17:22:57
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/G1/g1_gimbal_driver.cpp
|
||||
*/
|
||||
#include "g1_gimbal_driver.h"
|
||||
#include "g1_gimbal_crc32.h"
|
||||
|
@ -75,7 +75,7 @@ void g1GimbalDriver::convert(void *buf)
|
|||
state.rel.pitch = tempPos->HALL_pitch * G1_SCALE_FACTOR;
|
||||
updateGimbalStateCallback(state.rel.roll, state.rel.pitch, state.rel.yaw,
|
||||
state.abs.roll, state.abs.pitch, state.abs.yaw,
|
||||
state.fov.x, state.fov.y);
|
||||
state.fov.x, state.fov.y, updataCaller);
|
||||
mState.unlock();
|
||||
break;
|
||||
|
||||
|
|
|
@ -3,19 +3,18 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-28 12:24:21
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-09-07 02:31:19
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/G1/g1_gimbal_driver.h
|
||||
* @LastEditTime: 2023-12-05 16:29:58
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/G1/g1_gimbal_driver.h
|
||||
*/
|
||||
#include "../amov_gimbal.h"
|
||||
#ifndef __G1_DRIVER_H
|
||||
#define __G1_DRIVER_H
|
||||
|
||||
#include "../amov_gimbal_private.h"
|
||||
#include "g1_gimbal_struct.h"
|
||||
#include <mutex>
|
||||
#include <malloc.h>
|
||||
#include <iostream>
|
||||
|
||||
#ifndef __G1_DRIVER_H
|
||||
#define __G1_DRIVER_H
|
||||
|
||||
class g1GimbalDriver : protected amovGimbal::amovGimbalBase
|
||||
{
|
||||
private:
|
||||
|
@ -29,22 +28,22 @@ private:
|
|||
|
||||
public:
|
||||
// funtions
|
||||
uint32_t setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos);
|
||||
uint32_t setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed);
|
||||
uint32_t setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed);
|
||||
uint32_t setGimabalPos(const AMOV_GIMBAL_POS_T &pos);
|
||||
uint32_t setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed);
|
||||
uint32_t setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed);
|
||||
uint32_t setGimabalHome(void);
|
||||
|
||||
uint32_t takePic(void);
|
||||
uint32_t setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState);
|
||||
uint32_t setVideo(const AMOV_GIMBAL_VIDEO_T newState);
|
||||
|
||||
uint32_t attitudeCorrection(const amovGimbal::AMOV_GIMBAL_QUATERNION_T &quaterion,
|
||||
const amovGimbal::AMOV_GIMBAL_VELOCITY_T &speed,
|
||||
const amovGimbal::AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
uint32_t attitudeCorrection(const AMOV_GIMBAL_QUATERNION_T &quaterion,
|
||||
const AMOV_GIMBAL_VELOCITY_T &speed,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
void *extenData);
|
||||
|
||||
uint32_t attitudeCorrection(const amovGimbal::AMOV_GIMBAL_POS_T &pos,
|
||||
const amovGimbal::AMOV_GIMBAL_VELOCITY_T &seppd,
|
||||
const amovGimbal::AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
uint32_t attitudeCorrection(const AMOV_GIMBAL_POS_T &pos,
|
||||
const AMOV_GIMBAL_VELOCITY_T &seppd,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
void *extenData);
|
||||
uint32_t extensionFuntions(void *cmd);
|
||||
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2023-03-02 10:00:52
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-09-07 10:50:30
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/G1/g1_gimbal_funtion.cpp
|
||||
* @LastEditTime: 2023-12-05 16:29:51
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/G1/g1_gimbal_funtion.cpp
|
||||
*/
|
||||
#include "g1_gimbal_driver.h"
|
||||
#include "g1_gimbal_crc32.h"
|
||||
|
@ -18,7 +18,7 @@
|
|||
*
|
||||
* @return The return value is the number of bytes written to the buffer.
|
||||
*/
|
||||
uint32_t g1GimbalDriver::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos)
|
||||
uint32_t g1GimbalDriver::setGimabalPos(const AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
G1::GIMBAL_SET_POS_MSG_T temp;
|
||||
temp.mode = G1::GIMBAL_CMD_POS_MODE_ANGLE;
|
||||
|
@ -32,14 +32,14 @@ uint32_t g1GimbalDriver::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos)
|
|||
}
|
||||
|
||||
/**
|
||||
* It takes a struct of type amovGimbal::AMOV_GIMBAL_POS_T and converts it to a struct of type
|
||||
* It takes a struct of type AMOV_GIMBAL_POS_T and converts it to a struct of type
|
||||
* G1::GIMBAL_SET_POS_MSG_T
|
||||
*
|
||||
* @param speed the speed of the gimbal
|
||||
*
|
||||
* @return The return value is the number of bytes written to the buffer.
|
||||
*/
|
||||
uint32_t g1GimbalDriver::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed)
|
||||
uint32_t g1GimbalDriver::setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed)
|
||||
{
|
||||
G1::GIMBAL_SET_POS_MSG_T temp;
|
||||
temp.mode = G1::GIMBAL_CMD_POS_MODE_SPEED;
|
||||
|
@ -59,7 +59,7 @@ uint32_t g1GimbalDriver::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &sp
|
|||
*
|
||||
* @return The return value is the number of bytes written to the buffer.
|
||||
*/
|
||||
uint32_t g1GimbalDriver::setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed)
|
||||
uint32_t g1GimbalDriver::setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed)
|
||||
{
|
||||
state.maxFollow.pitch = followSpeed.pitch / G1_SCALE_FACTOR;
|
||||
state.maxFollow.roll = followSpeed.roll / G1_SCALE_FACTOR;
|
||||
|
@ -100,18 +100,18 @@ uint32_t g1GimbalDriver::takePic(void)
|
|||
*
|
||||
* @return The return value is the number of bytes written to the serial port.
|
||||
*/
|
||||
uint32_t g1GimbalDriver::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState)
|
||||
uint32_t g1GimbalDriver::setVideo(const AMOV_GIMBAL_VIDEO_T newState)
|
||||
{
|
||||
uint8_t temp = G1::GIMBAL_CMD_CAMERA_REC;
|
||||
|
||||
mState.lock();
|
||||
if (state.video == amovGimbal::AMOV_GIMBAL_VIDEO_TAKE)
|
||||
if (state.video == AMOV_GIMBAL_VIDEO_TAKE)
|
||||
{
|
||||
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_OFF;
|
||||
state.video = AMOV_GIMBAL_VIDEO_OFF;
|
||||
}
|
||||
else
|
||||
{
|
||||
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_TAKE;
|
||||
state.video = AMOV_GIMBAL_VIDEO_TAKE;
|
||||
}
|
||||
mState.unlock();
|
||||
|
||||
|
@ -124,10 +124,10 @@ uint32_t g1GimbalDriver::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState
|
|||
*
|
||||
* @param quaterion The "quaterion" parameter is a structure of type "AMOV_GIMBAL_QUATERNION_T" which
|
||||
* contains the following fields:
|
||||
* @param speed The "speed" parameter is of type `amovGimbal::AMOV_GIMBAL_VELOCITY_T` and represents
|
||||
* @param speed The "speed" parameter is of type `AMOV_GIMBAL_VELOCITY_T` and represents
|
||||
* the velocity of the gimbal. It contains three components: `x`, `y`, and `z`, which represent the
|
||||
* velocity in the respective axes.
|
||||
* @param acc The "acc" parameter is of type "amovGimbal::AMOV_GIMBAL_VELOCITY_T" and represents the
|
||||
* @param acc The "acc" parameter is of type "AMOV_GIMBAL_VELOCITY_T" and represents the
|
||||
* acceleration of the gimbal in three dimensions (x, y, z).
|
||||
* @param extenData The extenData parameter is a void pointer that can be used to pass additional data
|
||||
* to the attitudeCorrection function. In this case, it is being cast to a float pointer and then
|
||||
|
@ -136,9 +136,9 @@ uint32_t g1GimbalDriver::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState
|
|||
*
|
||||
* @return a uint32_t value.
|
||||
*/
|
||||
uint32_t g1GimbalDriver::attitudeCorrection(const amovGimbal::AMOV_GIMBAL_QUATERNION_T &quaterion,
|
||||
const amovGimbal::AMOV_GIMBAL_VELOCITY_T &speed,
|
||||
const amovGimbal::AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
uint32_t g1GimbalDriver::attitudeCorrection(const AMOV_GIMBAL_QUATERNION_T &quaterion,
|
||||
const AMOV_GIMBAL_VELOCITY_T &speed,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
void *extenData)
|
||||
{
|
||||
G1::GIMBAL_ATT_CORR_MSG_T temp;
|
||||
|
@ -161,12 +161,12 @@ uint32_t g1GimbalDriver::attitudeCorrection(const amovGimbal::AMOV_GIMBAL_QUATER
|
|||
* The function `attitudeCorrection` calculates the attitude correction for a gimbal based on the given
|
||||
* position, velocity, and acceleration values.
|
||||
*
|
||||
* @param pos The "pos" parameter is of type amovGimbal::AMOV_GIMBAL_POS_T and represents the current
|
||||
* @param pos The "pos" parameter is of type AMOV_GIMBAL_POS_T and represents the current
|
||||
* position of the gimbal. It contains the pitch, roll, and yaw angles of the gimbal.
|
||||
* @param seppd seppd stands for "Separate Pointing Device" and it represents the velocity of the
|
||||
* gimbal in terms of pitch, roll, and yaw. It is of type `amovGimbal::AMOV_GIMBAL_VELOCITY_T` which
|
||||
* gimbal in terms of pitch, roll, and yaw. It is of type `AMOV_GIMBAL_VELOCITY_T` which
|
||||
* likely contains three float values for pitch,
|
||||
* @param acc The "acc" parameter is of type "amovGimbal::AMOV_GIMBAL_VELOCITY_T" and represents the
|
||||
* @param acc The "acc" parameter is of type "AMOV_GIMBAL_VELOCITY_T" and represents the
|
||||
* acceleration of the gimbal in three dimensions (x, y, z).
|
||||
* @param extenData The `extenData` parameter is a void pointer that can be used to pass additional
|
||||
* data to the `attitudeCorrection` function. In this code snippet, it is assumed that `extenData` is a
|
||||
|
@ -174,9 +174,9 @@ uint32_t g1GimbalDriver::attitudeCorrection(const amovGimbal::AMOV_GIMBAL_QUATER
|
|||
*
|
||||
* @return a uint32_t value.
|
||||
*/
|
||||
uint32_t g1GimbalDriver::attitudeCorrection(const amovGimbal::AMOV_GIMBAL_POS_T &pos,
|
||||
const amovGimbal::AMOV_GIMBAL_VELOCITY_T &seppd,
|
||||
const amovGimbal::AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
uint32_t g1GimbalDriver::attitudeCorrection(const AMOV_GIMBAL_POS_T &pos,
|
||||
const AMOV_GIMBAL_VELOCITY_T &seppd,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
void *extenData)
|
||||
{
|
||||
G1::GIMBAL_ATT_CORR_MSG_T temp;
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:10:07
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-09-07 10:45:13
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/G1/g1_gimbal_struct.h
|
||||
* @LastEditTime: 2023-12-05 16:29:48
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/G1/g1_gimbal_struct.h
|
||||
*/
|
||||
#ifndef G1_GIMBAL_STRUCT_H
|
||||
#define G1_GIMBAL_STRUCT_H
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-10-20 16:33:07
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-05 16:29:39
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/GX40/GX40_gimbal_crc16.h
|
||||
*/
|
||||
#ifndef GX40_GIMBAL_CRC16_H
|
||||
#define GX40_GIMBAL_CRC16_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
namespace GX40
|
||||
{
|
||||
const static uint16_t crc16Tab[16] = {
|
||||
0x0000, 0x1021, 0x2042, 0x3063,
|
||||
0x4084, 0x50a5, 0x60c6, 0x70e7,
|
||||
0x8108, 0x9129, 0xa14a, 0xb16b,
|
||||
0xc18c, 0xd1ad, 0xe1ce, 0xf1ef};
|
||||
|
||||
static inline uint16_t CalculateCrc16(const uint8_t *ptr, uint8_t len)
|
||||
{
|
||||
uint16_t crc = 0;
|
||||
uint8_t temp;
|
||||
|
||||
while (len-- != 0)
|
||||
{
|
||||
temp = crc >> 12;
|
||||
crc <<= 4;
|
||||
crc ^= crc16Tab[temp ^ (*ptr >> 4)];
|
||||
temp = crc >> 12;
|
||||
crc <<= 4;
|
||||
crc ^= crc16Tab[temp ^ (*ptr & 0x0F)];
|
||||
ptr++;
|
||||
}
|
||||
crc = (crc >> 8) | (crc << 8);
|
||||
return (crc);
|
||||
}
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,304 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-10-20 16:08:17
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-06 10:27:28
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/GX40/GX40_gimbal_driver.cpp
|
||||
*/
|
||||
#include <string.h>
|
||||
#include "GX40_gimbal_driver.h"
|
||||
#include "GX40_gimbal_crc16.h"
|
||||
#include <math.h>
|
||||
|
||||
/**
|
||||
* The above function is a constructor for the GX40GimbalDriver class in C++, which initializes member
|
||||
* variables and sets the parser state to idle.
|
||||
*
|
||||
* @param _IO _IO is a pointer to an object of type amovGimbal::IOStreamBase. It is used to communicate
|
||||
* with the gimbal device.
|
||||
*/
|
||||
GX40GimbalDriver::GX40GimbalDriver(amovGimbal::IOStreamBase *_IO) : amovGimbal::amovGimbalBase(_IO)
|
||||
{
|
||||
rxQueue = new fifoRing(sizeof(GX40::GIMBAL_FRAME_T), MAX_QUEUE_SIZE);
|
||||
txQueue = new fifoRing(sizeof(GX40::GIMBAL_FRAME_T), MAX_QUEUE_SIZE);
|
||||
|
||||
targetPos[0] = 0;
|
||||
targetPos[1] = 0;
|
||||
targetPos[2] = 0;
|
||||
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_IDLE;
|
||||
upDataTs = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()) - std::chrono::milliseconds(2000);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `nopSend` continuously sends a "no operation" command to a GX40 gimbal driver.
|
||||
*/
|
||||
void GX40GimbalDriver::nopSend(void)
|
||||
{
|
||||
while (1)
|
||||
{
|
||||
// 50Hz
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(20));
|
||||
pack(GX40::GIMBAL_CMD_NOP, nullptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `parserStart` initializes the gimbal driver by setting the callback function, creating
|
||||
* two threads for the main loop and sending NOP commands, and detaching the threads.
|
||||
*
|
||||
* @param callback The parameter "callback" is of type "amovGimbal::pStateInvoke", which is a function
|
||||
* pointer type. It is used to specify a callback function that will be invoked when the gimbal state
|
||||
* is updated.
|
||||
*/
|
||||
void GX40GimbalDriver::parserStart(pAmovGimbalStateInvoke callback, void *caller)
|
||||
{
|
||||
this->updateGimbalStateCallback = callback;
|
||||
this->updataCaller = caller;
|
||||
|
||||
std::thread mainLoop(&GX40GimbalDriver::mainLoop, this);
|
||||
std::thread sendNop(&GX40GimbalDriver::nopSend, this);
|
||||
|
||||
this->stackThreadHanle = mainLoop.native_handle();
|
||||
this->nopSendThreadHandle = sendNop.native_handle();
|
||||
|
||||
mainLoop.detach();
|
||||
sendNop.detach();
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `pack` in the `GX40GimbalDriver` class is responsible for packing data into a frame for
|
||||
* transmission.
|
||||
*
|
||||
* @param uint32_t The parameter `cmd` is an unsigned 32-bit integer representing the command.
|
||||
* @param pPayload The `pPayload` parameter is a pointer to the payload data that needs to be packed.
|
||||
* It is of type `uint8_t*`, which means it is a pointer to an array of unsigned 8-bit integers. The
|
||||
* payload data is stored in this array.
|
||||
* @param payloadSize The parameter `payloadSize` represents the size of the payload data in bytes. It
|
||||
* is used to determine the size of the payload data that needs to be packed into the `temp` structure.
|
||||
*
|
||||
* @return a uint32_t value, which is stored in the variable "ret".
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payloadSize)
|
||||
{
|
||||
uint32_t ret = 0;
|
||||
GX40::GIMBAL_FRAME_T temp;
|
||||
memset(&temp, 0, sizeof(GX40::GIMBAL_FRAME_T));
|
||||
GX40::GIMBAL_PRIMARY_MASTER_FRAME_T *primary = (GX40::GIMBAL_PRIMARY_MASTER_FRAME_T *)temp.primaryData;
|
||||
carrierStateMutex.lock();
|
||||
|
||||
primary->state = 0X00;
|
||||
// 姿态数据&指令数据填充
|
||||
primary->roll = targetPos[0];
|
||||
primary->pitch = targetPos[1];
|
||||
primary->yaw = targetPos[2];
|
||||
|
||||
primary->state |= (0X01 << 2);
|
||||
|
||||
temp.otherData[0] = cmd;
|
||||
memcpy(temp.otherData + 1, pPayload, payloadSize);
|
||||
|
||||
// 固定字节填充
|
||||
temp.head.u16 = XF_SEND_HEAD;
|
||||
temp.version = 0X01;
|
||||
primary->secondaryFlag = 0X01;
|
||||
temp.lenght.u16 = 69 + payloadSize + 1 + 2;
|
||||
|
||||
// 惯导数据填充
|
||||
std::chrono::milliseconds nowTs = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch());
|
||||
// over 1s GNSS has losed
|
||||
if ((nowTs.count() - upDataTs.count()) < std::chrono::milliseconds(1500).count() &&
|
||||
cmd == GX40::GIMBAL_CMD_NOP)
|
||||
{
|
||||
primary->selfRoll = (int16_t)(-(carrierPos.roll / 0.01f));
|
||||
primary->selfPitch = (int16_t)(-(carrierPos.pitch / 0.01f));
|
||||
primary->selfYaw = (int16_t)(carrierPos.yaw / 0.01f);
|
||||
primary->accE = (int16_t)(carrierAcc.y / 0.01f);
|
||||
primary->accN = (int16_t)(carrierAcc.x / 0.01f);
|
||||
primary->accUp = (int16_t)(carrierAcc.z / 0.01f);
|
||||
primary->speedE = (int16_t)(carrierSpeed.y / 0.01f);
|
||||
primary->speedN = (int16_t)(carrierSpeed.x / 0.01f);
|
||||
primary->speedUp = (int16_t)(carrierSpeed.z / 0.01f);
|
||||
|
||||
carrierGNSS.GPSweeks = ((nowTs.count() / 1000) - 315964800) / 604800;
|
||||
carrierGNSS.GPSms = nowTs.count() - (carrierGNSS.GPSweeks * 604800000);
|
||||
|
||||
memcpy(temp.secondaryData, &carrierGNSS, sizeof(GX40::GIMBAL_SECONDARY_MASTER_FRAME_T));
|
||||
|
||||
primary->state |= (0X01 << 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
primary->state &= (~(0X01 << 0));
|
||||
}
|
||||
|
||||
carrierStateMutex.unlock();
|
||||
|
||||
// 校验
|
||||
*(uint16_t *)(&temp.otherData[payloadSize + 1]) = GX40::CalculateCrc16((uint8_t *)&temp, 69 + 1 + payloadSize);
|
||||
|
||||
// 添加至发送队列
|
||||
if (txQueue->inCell(&temp))
|
||||
{
|
||||
ret = temp.lenght.u16;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `convert` takes a buffer and extracts data from it to update the state of a gimbal
|
||||
* driver.
|
||||
*
|
||||
* @param buf The `buf` parameter is a void pointer that points to a buffer containing data that needs
|
||||
* to be converted.
|
||||
*/
|
||||
void GX40GimbalDriver::convert(void *buf)
|
||||
{
|
||||
GX40::GIMBAL_FRAME_T *temp;
|
||||
GX40::GIMBAL_PRIMARY_SLAVE_FRAME_T *primary;
|
||||
GX40::GIMBAL_SECONDARY_SLAVE_FRAME_T *secondary;
|
||||
temp = reinterpret_cast<GX40::GIMBAL_FRAME_T *>(buf);
|
||||
primary = (GX40::GIMBAL_PRIMARY_SLAVE_FRAME_T *)temp->primaryData;
|
||||
secondary = (GX40::GIMBAL_SECONDARY_SLAVE_FRAME_T *)temp->secondaryData;
|
||||
|
||||
mState.lock();
|
||||
this->state.workMode = (AMOV_GIMBAL_SERVO_MODE_T)primary->workMode;
|
||||
this->state.cameraFlag = (AMOV_GIMBAL_CAMERA_FLAG_T)primary->state;
|
||||
// 应该需要再解算一下,才能出具体的框架角度
|
||||
this->state.rel.yaw = -(primary->motorYaw * XF_ANGLE_DPI);
|
||||
this->state.rel.yaw = this->state.rel.yaw < -180.0f ? this->state.rel.yaw + 360.0f : this->state.rel.yaw;
|
||||
this->state.rel.pitch = -(primary->motorPitch * XF_ANGLE_DPI);
|
||||
this->state.rel.roll = -(primary->motorRoll * XF_ANGLE_DPI);
|
||||
|
||||
this->state.abs.yaw = -(primary->yaw * XF_ANGLE_DPI);
|
||||
this->state.abs.yaw = this->state.abs.yaw < -180.0f ? this->state.abs.yaw + 360.0f : this->state.abs.yaw;
|
||||
|
||||
this->state.abs.pitch = -(primary->pitch * XF_ANGLE_DPI);
|
||||
this->state.abs.roll = -(primary->roll * XF_ANGLE_DPI);
|
||||
|
||||
this->state.relSpeed.yaw = -(primary->speedYaw * XF_ANGLE_DPI);
|
||||
this->state.relSpeed.pitch = -(primary->speedPitch * XF_ANGLE_DPI);
|
||||
this->state.relSpeed.roll = -(primary->speedRoll * XF_ANGLE_DPI);
|
||||
|
||||
// 近似值 不准
|
||||
this->state.fov.x = secondary->camera1Zoom * 0.1f;
|
||||
this->state.fov.x = 60.2f / this->state.fov.x;
|
||||
this->state.fov.y = secondary->camera1Zoom * 0.1f;
|
||||
this->state.fov.y = 36.1f / this->state.fov.y;
|
||||
|
||||
updateGimbalStateCallback(state.rel.roll, state.rel.pitch, state.rel.yaw,
|
||||
state.abs.roll, state.abs.pitch, state.abs.yaw,
|
||||
state.fov.x, state.fov.y, updataCaller);
|
||||
|
||||
mState.unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* The function calculates the total length of a data packet by adding the length of the payload to the
|
||||
* size of a uint16_t.
|
||||
*
|
||||
* @param pack The parameter "pack" is a void pointer, which means it can point to any type of data. In
|
||||
* this case, it is expected to point to a structure of type "GX40::GIMBAL_FRAME_T".
|
||||
*
|
||||
* @return the sum of the length of the gimbal frame and the size of a uint16_t.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::calPackLen(void *pack)
|
||||
{
|
||||
return ((GX40::GIMBAL_FRAME_T *)pack)->lenght.u16;
|
||||
}
|
||||
/**
|
||||
* The function `parser` is used to parse incoming data frames in a specific format and returns a
|
||||
* boolean value indicating whether the parsing was successful or not.
|
||||
*
|
||||
* @param uint8_t The parameter `byte` is of type `uint8_t`, which is an unsigned 8-bit integer. It is
|
||||
* used to store a single byte of data that is being parsed by the `GX40GimbalDriver::parser` function.
|
||||
*
|
||||
* @return a boolean value, either true or false.
|
||||
*/
|
||||
bool GX40GimbalDriver::parser(IN uint8_t byte)
|
||||
{
|
||||
bool state = false;
|
||||
static uint8_t payloadLenght = 0;
|
||||
static uint8_t *pRx = nullptr;
|
||||
|
||||
switch (parserState)
|
||||
{
|
||||
case GX40::GIMBAL_FRAME_PARSER_STATE_IDLE:
|
||||
if (byte == ((XF_RCV_HEAD >> 8) & 0XFF))
|
||||
{
|
||||
rx.head.u8[0] = byte;
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_HEAD;
|
||||
}
|
||||
break;
|
||||
|
||||
case GX40::GIMBAL_FRAME_PARSER_STATE_HEAD:
|
||||
if (byte == ((XF_RCV_HEAD >> 0) & 0XFF))
|
||||
{
|
||||
rx.head.u8[1] = byte;
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_LEN1;
|
||||
}
|
||||
else
|
||||
{
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_IDLE;
|
||||
rx.head.u16 = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case GX40::GIMBAL_FRAME_PARSER_STATE_LEN1:
|
||||
rx.lenght.u8[0] = byte;
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_LEN2;
|
||||
break;
|
||||
|
||||
case GX40::GIMBAL_FRAME_PARSER_STATE_LEN2:
|
||||
rx.lenght.u8[1] = byte;
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_VERSION;
|
||||
break;
|
||||
|
||||
case GX40::GIMBAL_FRAME_PARSER_STATE_VERSION:
|
||||
if (byte == XF_VERSION)
|
||||
{
|
||||
rx.version = byte;
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_PAYLOAD;
|
||||
pRx = rx.primaryData;
|
||||
payloadLenght = rx.lenght.u16 - 5;
|
||||
}
|
||||
else
|
||||
{
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_IDLE;
|
||||
rx.head.u16 = 0;
|
||||
rx.lenght.u16 = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case GX40::GIMBAL_FRAME_PARSER_STATE_PAYLOAD:
|
||||
*pRx = byte;
|
||||
payloadLenght--;
|
||||
pRx++;
|
||||
if (payloadLenght <= 0)
|
||||
{
|
||||
if (*(uint16_t *)(pRx - sizeof(uint16_t)) == GX40::CalculateCrc16((uint8_t *)&rx, rx.lenght.u16 - 2))
|
||||
{
|
||||
state = true;
|
||||
rxQueue->inCell(&rx);
|
||||
}
|
||||
else
|
||||
{
|
||||
memset(&rx, 0, sizeof(GX40::GIMBAL_FRAME_T));
|
||||
}
|
||||
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_IDLE;
|
||||
pRx = nullptr;
|
||||
payloadLenght = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_IDLE;
|
||||
pRx = nullptr;
|
||||
payloadLenght = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
|
||||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-10-20 16:08:13
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-06 10:27:05
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/GX40/GX40_gimbal_driver.h
|
||||
*/
|
||||
#ifndef __GX40_DRIVER_H
|
||||
#define __GX40_DRIVER_H
|
||||
#include "../amov_gimbal_private.h"
|
||||
#include "GX40_gimbal_struct.h"
|
||||
#include <mutex>
|
||||
#include <malloc.h>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <time.h>
|
||||
|
||||
class GX40GimbalDriver : public amovGimbal::amovGimbalBase
|
||||
{
|
||||
GX40::GIMBAL_FRAME_PARSER_STATE_T parserState;
|
||||
GX40::GIMBAL_FRAME_T rx;
|
||||
|
||||
std::chrono::milliseconds upDataTs;
|
||||
std::mutex carrierStateMutex;
|
||||
|
||||
int16_t targetPos[3];
|
||||
|
||||
AMOV_GIMBAL_POS_T carrierPos;
|
||||
AMOV_GIMBAL_VELOCITY_T carrierSpeed;
|
||||
AMOV_GIMBAL_VELOCITY_T carrierAcc;
|
||||
GX40::GIMBAL_SECONDARY_MASTER_FRAME_T carrierGNSS;
|
||||
|
||||
std::thread::native_handle_type nopSendThreadHandle;
|
||||
void nopSend(void);
|
||||
void parserStart(pAmovGimbalStateInvoke callback, void *caller);
|
||||
|
||||
public:
|
||||
uint32_t pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payloadSize);
|
||||
bool parser(IN uint8_t byte);
|
||||
void convert(void *buf);
|
||||
uint32_t calPackLen(void *pack);
|
||||
|
||||
// funtions
|
||||
uint32_t setGimabalPos(const AMOV_GIMBAL_POS_T &pos);
|
||||
uint32_t setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed);
|
||||
uint32_t setGimabalHome(void);
|
||||
|
||||
uint32_t setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
uint32_t setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
|
||||
uint32_t takePic(void);
|
||||
uint32_t setVideo(const AMOV_GIMBAL_VIDEO_T newState);
|
||||
uint32_t attitudeCorrection(const AMOV_GIMBAL_POS_T &pos,
|
||||
const AMOV_GIMBAL_VELOCITY_T &seppd,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
void *extenData);
|
||||
|
||||
uint32_t setGNSSInfo(float lng, float lat, float alt, uint32_t nState, float relAlt);
|
||||
uint32_t extensionFuntions(void *cmd);
|
||||
static amovGimbal::amovGimbalBase *creat(amovGimbal::IOStreamBase *_IO)
|
||||
{
|
||||
return new GX40GimbalDriver(_IO);
|
||||
}
|
||||
GX40GimbalDriver(amovGimbal::IOStreamBase *_IO);
|
||||
~GX40GimbalDriver()
|
||||
{
|
||||
if (txQueue != nullptr)
|
||||
{
|
||||
delete txQueue;
|
||||
}
|
||||
if (rxQueue != nullptr)
|
||||
{
|
||||
delete rxQueue;
|
||||
}
|
||||
|
||||
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
|
||||
parserThreadHanle = parserThreadHanle == 0 ? 0 : pthread_cancel(parserThreadHanle);
|
||||
sendThreadHanle = sendThreadHanle == 0 ? 0 : pthread_cancel(sendThreadHanle);
|
||||
stackThreadHanle = stackThreadHanle == 0 ? 0 : pthread_cancel(stackThreadHanle);
|
||||
nopSendThreadHandle = nopSendThreadHandle == 0 ? 0 : pthread_cancel(nopSendThreadHandle);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
|
@ -0,0 +1,251 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-11-02 17:50:26
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-05 16:29:13
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/GX40/GX40_gimbal_funtion.cpp
|
||||
*/
|
||||
#include <string.h>
|
||||
#include "GX40_gimbal_driver.h"
|
||||
|
||||
/**
|
||||
* The function sets the target position of a gimbal based on the input roll, pitch, and yaw values.
|
||||
*
|
||||
* @param pos The parameter "pos" is of type "AMOV_GIMBAL_POS_T". It is a structure that
|
||||
* contains the roll, pitch, and yaw values of the gimbal position.
|
||||
*
|
||||
* @return a packed value of type uint32_t.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::setGimabalPos(const AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
carrierStateMutex.lock();
|
||||
targetPos[0] = (int16_t)(pos.roll / 0.01f);
|
||||
targetPos[1] = (int16_t)(-pos.pitch / 0.01f);
|
||||
targetPos[2] = (int16_t)(pos.yaw / 0.01f);
|
||||
carrierStateMutex.unlock();
|
||||
return pack(GX40::GIMBAL_CMD_MODE_EULER, nullptr, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function sets the gimbal speed based on the provided roll, pitch, and yaw values.
|
||||
*
|
||||
* @param speed The parameter "speed" is of type "AMOV_GIMBAL_POS_T". It is a structure
|
||||
* that contains the roll, pitch, and yaw values of the gimbal speed.
|
||||
*
|
||||
* @return the result of the pack() function, which is of type uint32_t.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed)
|
||||
{
|
||||
carrierStateMutex.lock();
|
||||
targetPos[0] = (int16_t)(speed.roll / 0.1f);
|
||||
targetPos[1] = (int16_t)(-speed.pitch / 0.1f);
|
||||
targetPos[2] = (int16_t)(speed.yaw / 0.1f);
|
||||
carrierStateMutex.unlock();
|
||||
|
||||
return pack(GX40::GIMBAL_CMD_MODE_FOLLOW, nullptr, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function sets the gimbal's home position to (0, 0, 0) and sends a command to the gimbal to go to
|
||||
* the home position.
|
||||
*
|
||||
* @return the result of the pack() function call with the arguments GX40::GIMBAL_CMD_HOME, nullptr, and
|
||||
* 0.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::setGimabalHome(void)
|
||||
{
|
||||
carrierStateMutex.lock();
|
||||
targetPos[0] = 0;
|
||||
targetPos[1] = 0;
|
||||
targetPos[2] = 0;
|
||||
carrierStateMutex.unlock();
|
||||
|
||||
pack(GX40::GIMBAL_CMD_MODE_FOLLOW, nullptr, 0);
|
||||
return pack(GX40::GIMBAL_CMD_HOME, nullptr, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `takePic` in the `GX40GimbalDriver` class takes a picture using the GX40 gimbal and
|
||||
* returns the packed command.
|
||||
*
|
||||
* @return a uint32_t value.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::takePic(void)
|
||||
{
|
||||
uint8_t temp = 0X01;
|
||||
|
||||
return pack(GX40::GIMBAL_CMD_TAKEPIC, &temp, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `setVideo` toggles the video state of a gimbal driver and returns a packed command.
|
||||
*
|
||||
* @param newState The parameter `newState` is of type `AMOV_GIMBAL_VIDEO_T`, which is an
|
||||
* enumeration representing the state of the video in the gimbal. It can have two possible values:
|
||||
*
|
||||
* @return the result of the `pack` function, which is a `uint32_t` value.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::setVideo(const AMOV_GIMBAL_VIDEO_T newState)
|
||||
{
|
||||
uint8_t temp = 0X01;
|
||||
|
||||
mState.lock();
|
||||
if (state.video == AMOV_GIMBAL_VIDEO_TAKE)
|
||||
{
|
||||
state.video = AMOV_GIMBAL_VIDEO_OFF;
|
||||
}
|
||||
else
|
||||
{
|
||||
state.video = AMOV_GIMBAL_VIDEO_TAKE;
|
||||
}
|
||||
mState.unlock();
|
||||
|
||||
return pack(GX40::GIMBAL_CMD_TAKEPIC, &temp, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `attitudeCorrection` updates the state of a gimbal driver with position, velocity, and
|
||||
* acceleration data.
|
||||
*
|
||||
* @param pos The "pos" parameter is of type "AMOV_GIMBAL_POS_T" and represents the current
|
||||
* position of the gimbal. It likely contains information such as the pitch, yaw, and roll angles of
|
||||
* the gimbal.
|
||||
* @param seppd The parameter `seppd` stands for "Separate Pointing Device" and represents the velocity
|
||||
* of the gimbal in separate axes (e.g., pitch, yaw, roll). It is of type
|
||||
* `AMOV_GIMBAL_VELOCITY_T`.
|
||||
* @param acc The "acc" parameter is of type "AMOV_GIMBAL_VELOCITY_T" and represents the
|
||||
* acceleration of the gimbal.
|
||||
* @param extenData The extenData parameter is a pointer to additional data that can be passed to the
|
||||
* attitudeCorrection function. It can be used to provide any extra information or context that may be
|
||||
* needed for the attitude correction calculation. The specific type and structure of the extenData is
|
||||
* not provided in the code snippet,
|
||||
*
|
||||
* @return the size of the data being passed as arguments. The size is calculated by adding the sizes
|
||||
* of the three types: sizeof(AMOV_GIMBAL_POS_T),
|
||||
* sizeof(AMOV_GIMBAL_VELOCITY_T), and sizeof(AMOV_GIMBAL_VELOCITY_T).
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::attitudeCorrection(const AMOV_GIMBAL_POS_T &pos,
|
||||
const AMOV_GIMBAL_VELOCITY_T &seppd,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc,
|
||||
void *extenData)
|
||||
{
|
||||
carrierStateMutex.lock();
|
||||
carrierPos = pos;
|
||||
carrierSpeed = seppd;
|
||||
carrierAcc = acc;
|
||||
upDataTs = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch());
|
||||
carrierStateMutex.unlock();
|
||||
return sizeof(AMOV_GIMBAL_POS_T) + sizeof(AMOV_GIMBAL_VELOCITY_T) + sizeof(AMOV_GIMBAL_VELOCITY_T);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `extensionFuntions` takes a command as input, converts it to a specific format, and
|
||||
* returns a 32-bit unsigned integer.
|
||||
*
|
||||
* @param cmd The parameter "cmd" is a void pointer, which means it can point to any type of data. In
|
||||
* this case, it is being cast to a uint8_t pointer, which means it is expected to point to an array of
|
||||
* uint8_t (8-bit unsigned integers).
|
||||
*
|
||||
* @return a value of type uint32_t.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::extensionFuntions(void *cmd)
|
||||
{
|
||||
uint8_t *temp = (uint8_t *)cmd;
|
||||
return pack(temp[0], &temp[2], temp[1]);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `setGimbalZoom` in the `GX40GimbalDriver` class sets the zoom level of a gimbal based on
|
||||
* the specified zoom type and target rate.
|
||||
*
|
||||
* @param zoom The "zoom" parameter is of type AMOV_GIMBAL_ZOOM_T, which is an enumeration
|
||||
* type. It represents the zoom action to be performed on the gimbal. The possible values for this
|
||||
* parameter are:
|
||||
* @param targetRate The targetRate parameter is a float value representing the desired zoom rate for
|
||||
* the gimbal. It is used to control the zoom functionality of the gimbal.
|
||||
*
|
||||
* @return a value of type uint32_t.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
uint8_t temp[4];
|
||||
uint8_t len = 0;
|
||||
temp[1] = 0X01;
|
||||
if (targetRate == 0.0f)
|
||||
{
|
||||
len = 1;
|
||||
switch (zoom)
|
||||
{
|
||||
case AMOV_GIMBAL_ZOOM_IN:
|
||||
temp[0] = GX40::GIMBAL_CMD_ZOMM_IN;
|
||||
break;
|
||||
case AMOV_GIMBAL_ZOOM_OUT:
|
||||
temp[0] = GX40::GIMBAL_CMD_ZOOM_OUT;
|
||||
break;
|
||||
case AMOV_GIMBAL_ZOOM_STOP:
|
||||
temp[0] = GX40::GIMBAL_CMD_ZOOM_STOP;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
len = 3;
|
||||
temp[0] = GX40::GIMBAL_CMD_ZOOM;
|
||||
int16_t targetTemp = (int16_t)(-targetRate / 0.1f);
|
||||
temp[2] = (targetTemp >> 0) & 0XFF;
|
||||
temp[3] = (targetTemp >> 8) & 0XFF;
|
||||
}
|
||||
|
||||
return pack(temp[0], &temp[1], len);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function "setGimbalFocus" sets the focus of a gimbal by specifying the zoom level and target
|
||||
* rate.
|
||||
*
|
||||
* @param zoom The zoom parameter is of type AMOV_GIMBAL_ZOOM_T, which is an enumeration
|
||||
* type representing different zoom levels for the gimbal. It is used to specify the desired zoom level
|
||||
* for the gimbal focus.
|
||||
* @param targetRate The targetRate parameter is a float value representing the desired zoom rate for
|
||||
* the gimbal.
|
||||
*
|
||||
* @return the result of the pack() function, which is of type uint32_t.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
uint8_t temp = 0X01;
|
||||
|
||||
return pack(GX40::GIMBAL_CMD_FOCUE, &temp, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function sets the GNSS information in the carrierGNSS struct and returns the size of the struct.
|
||||
*
|
||||
* @param lng The "lng" parameter represents the longitude value of the GNSS (Global Navigation
|
||||
* Satellite System) information.
|
||||
* @param lat The "lat" parameter represents the latitude value of the GNSS (Global Navigation
|
||||
* Satellite System) information.
|
||||
* @param alt The "alt" parameter represents the altitude value in meters.
|
||||
* @param nState The parameter "nState" represents the state of the GNSS (Global Navigation Satellite
|
||||
* System) information. It is of type uint32_t, which means it is an unsigned 32-bit integer. The
|
||||
* specific values and their meanings for the "nState" parameter are not provided in the code snippet
|
||||
* @param relAlt Relative altitude of the carrier (in meters)
|
||||
*
|
||||
* @return the size of the structure GX40::GIMBAL_SECONDARY_MASTER_FRAME_T.
|
||||
*/
|
||||
uint32_t GX40GimbalDriver::setGNSSInfo(float lng, float lat, float alt, uint32_t nState, float relAlt)
|
||||
{
|
||||
carrierStateMutex.lock();
|
||||
carrierGNSS.head = 0X01;
|
||||
|
||||
carrierGNSS.lng = lng / 1E-7;
|
||||
carrierGNSS.lat = lat / 1E-7;
|
||||
carrierGNSS.alt = alt / 1E-3;
|
||||
|
||||
carrierGNSS.relAlt = relAlt / 1E-3;
|
||||
carrierGNSS.nState = nState;
|
||||
|
||||
carrierStateMutex.unlock();
|
||||
return sizeof(GX40::GIMBAL_SECONDARY_MASTER_FRAME_T);
|
||||
}
|
|
@ -0,0 +1,154 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-10-20 16:08:13
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-05 16:28:54
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/GX40/GX40_gimbal_struct.h
|
||||
*/
|
||||
#ifndef GX40_GIMBAL_STRUCT_H
|
||||
#define GX40_GIMBAL_STRUCT_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
namespace GX40
|
||||
{
|
||||
#define XF_SEND_HEAD 0XE5A8
|
||||
#define XF_RCV_HEAD 0X8A5E
|
||||
#define XF_VERSION 0X00
|
||||
#define XF_ANGLE_DPI 0.01f
|
||||
|
||||
typedef enum
|
||||
{
|
||||
GIMBAL_FRAME_PARSER_STATE_IDLE,
|
||||
GIMBAL_FRAME_PARSER_STATE_HEAD,
|
||||
GIMBAL_FRAME_PARSER_STATE_LEN1,
|
||||
GIMBAL_FRAME_PARSER_STATE_LEN2,
|
||||
GIMBAL_FRAME_PARSER_STATE_VERSION,
|
||||
GIMBAL_FRAME_PARSER_STATE_PAYLOAD,
|
||||
} GIMBAL_FRAME_PARSER_STATE_T;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
GIMBAL_CMD_NOP = 0X00,
|
||||
GIMBAL_CMD_CAL = 0X01,
|
||||
GIMBAL_CMD_HOME = 0X03,
|
||||
GIMBAL_CMD_MODE_FPV = 0X10,
|
||||
GIMBAL_CMD_MODE_LOCK = 0X11,
|
||||
GIMBAL_CMD_MODE_FOLLOW = 0X12,
|
||||
GIMBAL_CMD_MODE_OVERLOCK = 0X13,
|
||||
GIMBAL_CMD_MODE_EULER = 0X14,
|
||||
GIMBAL_CMD_MODE_WATCH_POS = 0X15,
|
||||
GIMBAL_CMD_MODE_WATCH = 0X16,
|
||||
GIMBAL_CMD_MODE_TRACK = 0X17,
|
||||
GIMBAL_CMD_MODE_MOVE = 0X1A,
|
||||
GIMBAL_CMD_MODE_MOVE_TRACK = 0X1B,
|
||||
GIMBAL_CMD_TAKEPIC = 0X20,
|
||||
GIMBAL_CMD_TAKEVIDEO = 0X21,
|
||||
GIMBAL_CMD_ZOOM_OUT = 0X22,
|
||||
GIMBAL_CMD_ZOMM_IN = 0X23,
|
||||
GIMBAL_CMD_ZOOM_STOP = 0X24,
|
||||
GIMBAL_CMD_ZOOM = 0X25,
|
||||
GIMBAL_CMD_FOCUE = 0X26,
|
||||
GIMBAL_CMD_VIDEO_MODE = 0X2A,
|
||||
GIMBAL_CMD_NIGHT = 0X2B,
|
||||
GIMBAL_CMD_OSD = 0X73,
|
||||
GIMBAL_CMD_FIX_MODE = 0X74,
|
||||
GIMBAL_CMD_LIGHT = 0X80,
|
||||
GIMBAL_CMD_TAKE_DISTANCE = 0X81,
|
||||
} GIMBAL_CMD_T;
|
||||
|
||||
#pragma pack(1)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
union
|
||||
{
|
||||
uint8_t u8[2];
|
||||
uint16_t u16;
|
||||
} head;
|
||||
union
|
||||
{
|
||||
uint8_t u8[2];
|
||||
uint16_t u16;
|
||||
} lenght;
|
||||
uint8_t version;
|
||||
uint8_t primaryData[32];
|
||||
uint8_t secondaryData[32];
|
||||
uint8_t otherData[32];
|
||||
union
|
||||
{
|
||||
uint8_t u8[2];
|
||||
uint16_t u16;
|
||||
} crc16;
|
||||
} GIMBAL_FRAME_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int16_t roll;
|
||||
int16_t pitch;
|
||||
int16_t yaw;
|
||||
uint8_t state;
|
||||
int16_t selfRoll;
|
||||
int16_t selfPitch;
|
||||
uint16_t selfYaw;
|
||||
int16_t accN;
|
||||
int16_t accE;
|
||||
int16_t accUp;
|
||||
int16_t speedN;
|
||||
int16_t speedE;
|
||||
int16_t speedUp;
|
||||
uint8_t secondaryFlag;
|
||||
uint8_t reserve[6];
|
||||
} GIMBAL_PRIMARY_MASTER_FRAME_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint8_t workMode;
|
||||
uint16_t state;
|
||||
int16_t offsetX;
|
||||
int16_t offsetY;
|
||||
uint16_t motorRoll;
|
||||
uint16_t motorPitch;
|
||||
uint16_t motorYaw;
|
||||
int16_t roll;
|
||||
int16_t pitch;
|
||||
uint16_t yaw;
|
||||
int16_t speedRoll;
|
||||
int16_t speedPitch;
|
||||
int16_t speedYaw;
|
||||
uint8_t reserve[7];
|
||||
} GIMBAL_PRIMARY_SLAVE_FRAME_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint8_t head;
|
||||
int32_t lng;
|
||||
int32_t lat;
|
||||
int32_t alt;
|
||||
uint8_t nState;
|
||||
uint32_t GPSms;
|
||||
int32_t GPSweeks;
|
||||
int32_t relAlt;
|
||||
uint8_t reserve[8];
|
||||
} GIMBAL_SECONDARY_MASTER_FRAME_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint8_t head;
|
||||
uint8_t versionHW;
|
||||
uint8_t versionSoft;
|
||||
uint8_t type;
|
||||
uint16_t error;
|
||||
int32_t targetDistance;
|
||||
int32_t targetLng;
|
||||
int32_t targetLat;
|
||||
int32_t targetAlt;
|
||||
uint16_t camera1Zoom;
|
||||
uint16_t camera2Zoom;
|
||||
uint8_t reserve[6];
|
||||
} GIMBAL_SECONDARY_SLAVE_FRAME_T;
|
||||
#pragma pack()
|
||||
}
|
||||
|
||||
#endif
|
|
@ -3,12 +3,11 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:10:06
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-03-23 17:24:23
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_crc32.h
|
||||
* @LastEditTime: 2023-12-05 16:28:29
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_crc32.h
|
||||
*/
|
||||
#ifndef Q10F_GIMBAL_CRC32_H
|
||||
#define Q10F_GIMBAL_CRC32_H
|
||||
|
||||
namespace Q10f
|
||||
{
|
||||
static inline unsigned char CheckSum(unsigned char *pData, unsigned short Lenght)
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:10:06
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-07-24 12:03:44
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_driver.cpp
|
||||
* @LastEditTime: 2023-12-05 17:23:15
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_driver.cpp
|
||||
*/
|
||||
#include "Q10f_gimbal_driver.h"
|
||||
#include "Q10f_gimbal_crc32.h"
|
||||
|
@ -86,7 +86,7 @@ void Q10fGimbalDriver::convert(void *buf)
|
|||
state.rel.pitch = tempPos->pitchStatorRotorAngle * Q10F_SCALE_FACTOR_SPEED;
|
||||
updateGimbalStateCallback(state.rel.roll, state.rel.pitch, state.rel.yaw,
|
||||
state.abs.roll, state.abs.pitch, state.abs.yaw,
|
||||
state.fov.x, state.fov.y);
|
||||
state.fov.x, state.fov.y, updataCaller);
|
||||
mState.unlock();
|
||||
|
||||
break;
|
||||
|
|
|
@ -3,19 +3,18 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-28 12:24:21
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-03-28 17:01:00
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_driver.h
|
||||
* @LastEditTime: 2023-12-05 16:27:45
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_driver.h
|
||||
*/
|
||||
#include "../amov_gimbal.h"
|
||||
#ifndef __Q10F_DRIVER_H
|
||||
#define __Q10F_DRIVER_H
|
||||
|
||||
#include "../amov_gimbal_private.h"
|
||||
#include "Q10f_gimbal_struct.h"
|
||||
#include <mutex>
|
||||
#include <malloc.h>
|
||||
#include <iostream>
|
||||
|
||||
#ifndef __Q10F_DRIVER_H
|
||||
#define __Q10F_DRIVER_H
|
||||
|
||||
class Q10fGimbalDriver : protected amovGimbal::amovGimbalBase
|
||||
{
|
||||
private:
|
||||
|
@ -29,16 +28,16 @@ private:
|
|||
|
||||
public:
|
||||
// funtions
|
||||
uint32_t setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos);
|
||||
uint32_t setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed);
|
||||
uint32_t setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed);
|
||||
uint32_t setGimabalPos(const AMOV_GIMBAL_POS_T &pos);
|
||||
uint32_t setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed);
|
||||
uint32_t setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed);
|
||||
uint32_t setGimabalHome(void);
|
||||
|
||||
uint32_t setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
uint32_t setGimbalFocus(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
uint32_t setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
uint32_t setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
|
||||
uint32_t takePic(void);
|
||||
uint32_t setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState);
|
||||
uint32_t setVideo(const AMOV_GIMBAL_VIDEO_T newState);
|
||||
|
||||
// builds
|
||||
static amovGimbal::amovGimbalBase *creat(amovGimbal::IOStreamBase *_IO)
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2023-03-02 10:00:52
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-07-24 14:22:57
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_funtion.cpp
|
||||
* @LastEditTime: 2023-12-05 16:27:39
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_funtion.cpp
|
||||
*/
|
||||
#include "Q10f_gimbal_driver.h"
|
||||
#include "Q10f_gimbal_crc32.h"
|
||||
|
@ -17,7 +17,7 @@
|
|||
*
|
||||
* @return The return value is the number of bytes written to the buffer.
|
||||
*/
|
||||
uint32_t Q10fGimbalDriver::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos)
|
||||
uint32_t Q10fGimbalDriver::setGimabalPos(const AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
Q10f::GIMBAL_SET_POS_MSG_T temp;
|
||||
temp.modeR = Q10f::GIMBAL_CMD_POS_MODE_ANGLE_SPEED;
|
||||
|
@ -41,7 +41,7 @@ uint32_t Q10fGimbalDriver::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &po
|
|||
*
|
||||
* @return The return value is the number of bytes written to the buffer.
|
||||
*/
|
||||
uint32_t Q10fGimbalDriver::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed)
|
||||
uint32_t Q10fGimbalDriver::setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed)
|
||||
{
|
||||
Q10f::GIMBAL_SET_POS_MSG_T temp;
|
||||
temp.modeR = Q10f::GIMBAL_CMD_POS_MODE_SPEED;
|
||||
|
@ -64,7 +64,7 @@ uint32_t Q10fGimbalDriver::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &
|
|||
*
|
||||
* @return The return value is the number of bytes written to the buffer.
|
||||
*/
|
||||
uint32_t Q10fGimbalDriver::setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed)
|
||||
uint32_t Q10fGimbalDriver::setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed)
|
||||
{
|
||||
state.maxFollow.pitch = followSpeed.pitch / 0.1220740379f;
|
||||
state.maxFollow.roll = followSpeed.roll / 0.1220740379f;
|
||||
|
@ -108,25 +108,25 @@ uint32_t Q10fGimbalDriver::takePic(void)
|
|||
*
|
||||
* @return The return value is the number of bytes written to the serial port.
|
||||
*/
|
||||
uint32_t Q10fGimbalDriver::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState)
|
||||
uint32_t Q10fGimbalDriver::setVideo(const AMOV_GIMBAL_VIDEO_T newState)
|
||||
{
|
||||
uint8_t cmd[2] = {0X01, 0XFF};
|
||||
|
||||
if (newState == amovGimbal::AMOV_GIMBAL_VIDEO_TAKE)
|
||||
if (newState == AMOV_GIMBAL_VIDEO_TAKE)
|
||||
{
|
||||
cmd[0] = 0X02;
|
||||
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_TAKE;
|
||||
state.video = AMOV_GIMBAL_VIDEO_TAKE;
|
||||
}
|
||||
else
|
||||
{
|
||||
cmd[0] = 0X03;
|
||||
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_OFF;
|
||||
state.video = AMOV_GIMBAL_VIDEO_OFF;
|
||||
}
|
||||
|
||||
return pack(Q10f::GIMBAL_CMD_CAMERA, (uint8_t *)cmd, sizeof(cmd));
|
||||
}
|
||||
|
||||
uint32_t Q10fGimbalDriver::setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
uint32_t Q10fGimbalDriver::setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
uint8_t cmd[5] = {0X00, 0X00, 0X00, 0X00, 0XFF};
|
||||
if (targetRate == 0.0f)
|
||||
|
@ -134,13 +134,13 @@ uint32_t Q10fGimbalDriver::setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, fl
|
|||
cmd[1] = 0XFF;
|
||||
switch (zoom)
|
||||
{
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_IN:
|
||||
case AMOV_GIMBAL_ZOOM_IN:
|
||||
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_IN;
|
||||
break;
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_OUT:
|
||||
case AMOV_GIMBAL_ZOOM_OUT:
|
||||
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_OUT;
|
||||
break;
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_STOP:
|
||||
case AMOV_GIMBAL_ZOOM_STOP:
|
||||
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_STOP;
|
||||
break;
|
||||
default:
|
||||
|
@ -159,18 +159,18 @@ uint32_t Q10fGimbalDriver::setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, fl
|
|||
}
|
||||
}
|
||||
|
||||
uint32_t Q10fGimbalDriver::setGimbalFocus(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
uint32_t Q10fGimbalDriver::setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
uint8_t cmd[2] = {0X00, 0XFF};
|
||||
switch (zoom)
|
||||
{
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_IN:
|
||||
case AMOV_GIMBAL_ZOOM_IN:
|
||||
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_IN;
|
||||
break;
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_OUT:
|
||||
case AMOV_GIMBAL_ZOOM_OUT:
|
||||
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_OUT;
|
||||
break;
|
||||
case amovGimbal::AMOV_GIMBAL_ZOOM_STOP:
|
||||
case AMOV_GIMBAL_ZOOM_STOP:
|
||||
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_STOP;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:10:07
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-07-24 11:51:59
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_struct.h
|
||||
* @LastEditTime: 2023-12-05 16:27:27
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/Q10f/Q10f_gimbal_struct.h
|
||||
*/
|
||||
#ifndef Q10F_GIMBAL_STRUCT_H
|
||||
#define Q10F_GIMBAL_STRUCT_H
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* @Description: External interface of amov gimbals
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:34:26
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-05 17:37:09
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/amovGimbal/amov_gimbal.h
|
||||
*/
|
||||
|
||||
#ifndef AMOV_GIMBAL_H
|
||||
#define AMOV_GIMBAL_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <iostream>
|
||||
|
||||
#include "amov_gimbal_struct.h"
|
||||
|
||||
namespace amovGimbal
|
||||
{
|
||||
#define IN
|
||||
#define OUT
|
||||
#define SET
|
||||
|
||||
#ifndef MAX_QUEUE_SIZE
|
||||
#define MAX_QUEUE_SIZE 100
|
||||
#endif
|
||||
|
||||
static inline void idleCallback(double frameAngleRoll, double frameAnglePitch, double frameAngleYaw,
|
||||
double imuAngleRoll, double imuAnglePitch, double imuAngleYaw,
|
||||
double fovX, double fovY, void *caller)
|
||||
{
|
||||
}
|
||||
static inline void idleMsgCallback(void *msg, void *caller)
|
||||
{
|
||||
}
|
||||
|
||||
// Control data input and output
|
||||
class IOStreamBase
|
||||
{
|
||||
public:
|
||||
IOStreamBase() {}
|
||||
virtual ~IOStreamBase() {}
|
||||
|
||||
virtual bool open() = 0;
|
||||
virtual bool close() = 0;
|
||||
virtual bool isOpen() = 0;
|
||||
virtual bool isBusy() = 0;
|
||||
// These two functions need to be thread-safe
|
||||
virtual uint32_t inPutBytes(IN uint8_t *byte) = 0;
|
||||
virtual uint32_t outPutBytes(IN uint8_t *byte, uint32_t lenght) = 0;
|
||||
};
|
||||
|
||||
class gimbal
|
||||
{
|
||||
private:
|
||||
std::string typeName;
|
||||
// Instantiated device handle
|
||||
void *devHandle;
|
||||
|
||||
public:
|
||||
static void inBytesCallback(uint8_t *pData, uint32_t len, gimbal *handle);
|
||||
// Protocol stack function items
|
||||
void startStack(void);
|
||||
void parserAuto(pAmovGimbalStateInvoke callback = idleCallback, void *caller = nullptr);
|
||||
void setParserCallback(pAmovGimbalStateInvoke callback, void *caller = nullptr);
|
||||
void setMsgCallback(pAmovGimbalMsgInvoke callback, void *caller = nullptr);
|
||||
void setRcvBytes(pAmovGimbalInputBytesInvoke callbaclk, void *caller = nullptr);
|
||||
void setSendBytes(pAmovGimbalOutputBytesInvoke callbaclk, void *caller = nullptr);
|
||||
AMOV_GIMBAL_STATE_T getGimabalState(void);
|
||||
|
||||
// non-block functions
|
||||
uint32_t setGimabalPos(const AMOV_GIMBAL_POS_T &pos);
|
||||
uint32_t setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed);
|
||||
uint32_t setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed);
|
||||
|
||||
uint32_t setGimabalHome(void);
|
||||
uint32_t setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
uint32_t setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
uint32_t setGimbalROI(const AMOV_GIMBAL_ROI_T area);
|
||||
uint32_t takePic(void);
|
||||
uint32_t setVideo(const AMOV_GIMBAL_VIDEO_T newState);
|
||||
uint32_t attitudeCorrection(const AMOV_GIMBAL_QUATERNION_T &quaterion,
|
||||
const AMOV_GIMBAL_VELOCITY_T &speed,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData);
|
||||
uint32_t attitudeCorrection(const AMOV_GIMBAL_POS_T &pos,
|
||||
const AMOV_GIMBAL_VELOCITY_T &seppd,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData);
|
||||
uint32_t setGNSSInfo(float lng, float lat, float alt, uint32_t nState, float relAlt);
|
||||
uint32_t extensionFuntions(void *cmd);
|
||||
|
||||
// block functions
|
||||
bool setGimbalPosBlock(const AMOV_GIMBAL_POS_T &pos);
|
||||
bool setGimabalHomeBlock(void);
|
||||
bool setGimbalZoomBlock(float targetRate);
|
||||
bool takePicBlock(void);
|
||||
bool calibrationBlock(void);
|
||||
|
||||
std::string name()
|
||||
{
|
||||
return typeName;
|
||||
}
|
||||
|
||||
gimbal(const std::string &type, IOStreamBase *_IO,
|
||||
uint32_t _self = 0x02, uint32_t _remote = 0X80);
|
||||
|
||||
~gimbal();
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-11-24 16:01:22
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-06 11:35:58
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/amovGimbal/amov_gimbal_c.h
|
||||
*/
|
||||
#ifndef AMOV_GIMBAL_C_H
|
||||
#define AMOV_GIMBAL_C_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "amov_gimbal_struct.h"
|
||||
|
||||
extern "C"
|
||||
{
|
||||
// initialization funtions
|
||||
void amovGimbalCreat(char *type, uint32_t selfId, uint32_t gimbalId, void *handle);
|
||||
void amovGimbalInBytesCallback(uint8_t *pData, uint32_t len, void *handle);
|
||||
void amovGimbalSetRcvBytes(pAmovGimbalInputBytesInvoke callbaclk, void *handle, void *caller);
|
||||
void amovGimbalSetSendBytes(pAmovGimbalOutputBytesInvoke callbaclk, void *handle, void *caller);
|
||||
void amovGimbalChangeStateCallback(pAmovGimbalStateInvoke callback, void *handle, void *caller);
|
||||
void amovGimbalSetMsgCallback(pAmovGimbalMsgInvoke callback, void *handle, void *caller);
|
||||
void amovGimbalStart(pAmovGimbalStateInvoke callback, void *handle, void *caller);
|
||||
|
||||
// non-block functions
|
||||
uint32_t amovGimbalSetGimabalPos(AMOV_GIMBAL_POS_T *pos, void *handle);
|
||||
uint32_t amovGimbalSetGimabalSpeed(AMOV_GIMBAL_POS_T *speed, void *handle);
|
||||
uint32_t amovGimbalSetGimabalFollowSpeed(AMOV_GIMBAL_POS_T *followSpeed, void *handle);
|
||||
uint32_t amovGimbalSetGimabalHome(void *handle);
|
||||
uint32_t amovGimbalSetGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate, void *handle);
|
||||
uint32_t amovGimbalSetGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate, void *handle);
|
||||
uint32_t amovGimbalSetGimbalROI(AMOV_GIMBAL_ROI_T *area, void *handle);
|
||||
uint32_t amovGimbalTakePic(void *handle);
|
||||
uint32_t amovGimbalSetVideo(AMOV_GIMBAL_VIDEO_T newState, void *handle);
|
||||
uint32_t amovGimbalAttitudeCorrectionQ(AMOV_GIMBAL_QUATERNION_T *quaterion,
|
||||
AMOV_GIMBAL_VELOCITY_T *speed,
|
||||
AMOV_GIMBAL_VELOCITY_T *acc, void *extenData, void *handle);
|
||||
uint32_t amovGimbalAttitudeCorrectionE(AMOV_GIMBAL_POS_T *pos,
|
||||
AMOV_GIMBAL_VELOCITY_T *speed,
|
||||
AMOV_GIMBAL_VELOCITY_T *acc, void *extenData, void *handle);
|
||||
uint32_t amovGimbalSetGNSSInfo(float lng, float lat, float alt, uint32_t nState, float relAlt, void *handle);
|
||||
uint32_t amovGimbalExtensionFuntions(void *cmd, void *handle);
|
||||
void getGimabalState(AMOV_GIMBAL_STATE_T *state, void *handle);
|
||||
void getGimbalType(char *type, void *handle);
|
||||
|
||||
// block functions
|
||||
bool amovGimbalSetGimbalPosBlock(AMOV_GIMBAL_POS_T *pos, void *handle);
|
||||
bool amovGimbalSetGimabalHomeBlock(void *handle);
|
||||
bool amovGimbalSetGimbalZoomBlock(float targetRate, void *handle);
|
||||
bool amovGimbalTakePicBlock(void *handle);
|
||||
bool amovGimbalCalibrationBlock(void *handle);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* @Description: Common Data Structures of gimbal
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-31 11:56:43
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-05 17:03:02
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/amovGimbal/amov_gimbal_struct.h
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#ifndef __AMOV_GIMABL_STRUCT_H
|
||||
#define __AMOV_GIMABL_STRUCT_H
|
||||
|
||||
typedef void (*pAmovGimbalStateInvoke)(double frameAngleRoll, double frameAnglePitch, double frameAngleYaw,
|
||||
double imuAngleRoll, double imuAnglePitch, double imuAngleYaw,
|
||||
double fovX, double fovY, void *caller);
|
||||
typedef void (*pAmovGimbalMsgInvoke)(void *msg, void *caller);
|
||||
typedef uint32_t (*pAmovGimbalInputBytesInvoke)(uint8_t *pData, void *caller);
|
||||
typedef uint32_t (*pAmovGimbalOutputBytesInvoke)(uint8_t *pData, uint32_t len, void *caller);
|
||||
|
||||
typedef enum
|
||||
{
|
||||
AMOV_GIMBAL_SERVO_MODE_FPV = 0X10,
|
||||
AMOV_GIMBAL_SERVO_MODE_LOCK = 0X11,
|
||||
AMOV_GIMBAL_SERVO_MODE_FOLLOW = 0X12,
|
||||
AMOV_GIMBAL_SERVO_MODE_OVERLOOK = 0X13,
|
||||
AMOV_GIMBAL_SERVO_MODE_EULER = 0X14,
|
||||
AMOV_GIMBAL_SERVO_MODE_WATCH = 0X16,
|
||||
AMOV_GIMBAL_SERVO_MODE_TRACK = 0X17,
|
||||
} AMOV_GIMBAL_SERVO_MODE_T;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
AMOV_GIMBAL_CAMERA_FLAG_INVERSION = 0X1000,
|
||||
AMOV_GIMBAL_CAMERA_FLAG_IR = 0X0200,
|
||||
AMOV_GIMBAL_CAMERA_FLAG_RF = 0X0100,
|
||||
AMOV_GIMBAL_CAMERA_FLAG_LOCK = 0X0001,
|
||||
} AMOV_GIMBAL_CAMERA_FLAG_T;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
AMOV_GIMBAL_VIDEO_TAKE,
|
||||
AMOV_GIMBAL_VIDEO_OFF
|
||||
} AMOV_GIMBAL_VIDEO_T;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
AMOV_GIMBAL_ZOOM_IN,
|
||||
AMOV_GIMBAL_ZOOM_OUT,
|
||||
AMOV_GIMBAL_ZOOM_STOP
|
||||
} AMOV_GIMBAL_ZOOM_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double yaw;
|
||||
double roll;
|
||||
double pitch;
|
||||
} AMOV_GIMBAL_POS_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double x;
|
||||
double y;
|
||||
} AMOV_GIMBAL_FOV_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
AMOV_GIMBAL_SERVO_MODE_T workMode;
|
||||
AMOV_GIMBAL_CAMERA_FLAG_T cameraFlag;
|
||||
AMOV_GIMBAL_VIDEO_T video;
|
||||
AMOV_GIMBAL_POS_T abs;
|
||||
AMOV_GIMBAL_POS_T rel;
|
||||
AMOV_GIMBAL_POS_T relSpeed;
|
||||
AMOV_GIMBAL_POS_T maxFollow;
|
||||
AMOV_GIMBAL_FOV_T fov;
|
||||
} AMOV_GIMBAL_STATE_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint32_t centreX;
|
||||
uint32_t centreY;
|
||||
uint32_t hight;
|
||||
uint32_t width;
|
||||
} AMOV_GIMBAL_ROI_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double q0;
|
||||
double q1;
|
||||
double q2;
|
||||
double q3;
|
||||
} AMOV_GIMBAL_QUATERNION_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double x; // or N
|
||||
double y; // or E
|
||||
double z; // or UP
|
||||
} AMOV_GIMBAL_VELOCITY_T;
|
||||
|
||||
#endif
|
|
@ -1,328 +0,0 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-28 11:54:11
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-08-17 11:57:11
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/amov_gimbal.cpp
|
||||
*/
|
||||
|
||||
// #include "amov_gimbal.h"
|
||||
#include "amov_gimbal_private.h"
|
||||
#include "g1_gimbal_driver.h"
|
||||
#include "Q10f_gimbal_driver.h"
|
||||
#include "AT10_gimbal_driver.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <map>
|
||||
#include <iterator>
|
||||
|
||||
#define MAX_PACK_SIZE 280
|
||||
typedef enum
|
||||
{
|
||||
AMOV_GIMBAL_TYPE_NULL,
|
||||
AMOV_GIMBAL_TYPE_G1 = 1,
|
||||
AMOV_GIMBAL_TYPE_G2,
|
||||
AMOV_GIMBAL_TYPE_Q10,
|
||||
AMOV_GIMBAL_TYPE_AT10,
|
||||
} AMOV_GIMBAL_TYPE_T;
|
||||
|
||||
namespace amovGimbal
|
||||
{
|
||||
typedef amovGimbal::amovGimbalBase *(*createCallback)(amovGimbal::IOStreamBase *_IO);
|
||||
typedef std::map<std::string, createCallback> callbackMap;
|
||||
std::map<std::string, AMOV_GIMBAL_TYPE_T> amovGimbalTypeList =
|
||||
{
|
||||
{"G1", AMOV_GIMBAL_TYPE_G1},
|
||||
{"Q10f", AMOV_GIMBAL_TYPE_Q10},
|
||||
{"AT10", AMOV_GIMBAL_TYPE_AT10}};
|
||||
|
||||
callbackMap amovGimbals =
|
||||
{
|
||||
{"G1", g1GimbalDriver::creat},
|
||||
{"Q10f", Q10fGimbalDriver::creat},
|
||||
{"AT10", AT10GimbalDriver::creat}};
|
||||
}
|
||||
|
||||
/* The amovGimbalCreator class is a factory class that creates an instance of the amovGimbal class */
|
||||
// Factory used to create the gimbal instance
|
||||
class amovGimbalCreator
|
||||
{
|
||||
public:
|
||||
static amovGimbal::amovGimbalBase *createAmovGimbal(const std::string &type, amovGimbal::IOStreamBase *_IO)
|
||||
{
|
||||
amovGimbal::callbackMap::iterator temp = amovGimbal::amovGimbals.find(type);
|
||||
|
||||
if (temp != amovGimbal::amovGimbals.end())
|
||||
{
|
||||
return (temp->second)(_IO);
|
||||
}
|
||||
std::cout << type << " is Unsupported device type!" << std::endl;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
private:
|
||||
amovGimbalCreator()
|
||||
{
|
||||
}
|
||||
static amovGimbalCreator *pInstance;
|
||||
static amovGimbalCreator *getInstance()
|
||||
{
|
||||
if (pInstance == NULL)
|
||||
{
|
||||
pInstance = new amovGimbalCreator();
|
||||
}
|
||||
return pInstance;
|
||||
}
|
||||
|
||||
~amovGimbalCreator();
|
||||
};
|
||||
|
||||
/**
|
||||
* This is a constructor for the amovGimbalBase class that initializes its parent classes with an
|
||||
* IOStreamBase object.
|
||||
*
|
||||
* @param _IO _IO is a pointer to an object of type amovGimbal::IOStreamBase, which is the base class
|
||||
* for input/output streams used by the amovGimbal class. This parameter is passed to the constructor
|
||||
* of amovGimbalBase, which is a derived class of I
|
||||
*/
|
||||
amovGimbal::amovGimbalBase::amovGimbalBase(amovGimbal::IOStreamBase *_IO) : amovGimbal::IamovGimbalBase(), amovGimbal::PamovGimbalBase(_IO)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* The function is a destructor that sleeps for 50 milliseconds and closes an IO object.
|
||||
*/
|
||||
amovGimbal::amovGimbalBase::~amovGimbalBase()
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
IO->close();
|
||||
}
|
||||
|
||||
/**
|
||||
* This function retrieves a packet from a ring buffer queue and returns a boolean value indicating
|
||||
* whether the operation was successful or not.
|
||||
*
|
||||
* @param void void is a keyword in C++ that represents the absence of a type. In this function, it is
|
||||
* used to indicate that the function does not return any value.
|
||||
*
|
||||
* @return a boolean value, which indicates whether or not a data packet was successfully retrieved
|
||||
* from a ring buffer queue.
|
||||
*/
|
||||
bool amovGimbal::amovGimbalBase::getRxPack(OUT void *pack)
|
||||
{
|
||||
bool state = false;
|
||||
state = rxQueue->outCell(pack);
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* This function sends data from a buffer to an output device if it is not busy and open.
|
||||
*/
|
||||
void amovGimbal::amovGimbalBase::send(void)
|
||||
{
|
||||
uint8_t tempBuffer[MAX_PACK_SIZE];
|
||||
|
||||
if (!IO->isBusy() && IO->isOpen())
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
state = txQueue->outCell(&tempBuffer);
|
||||
|
||||
if (state)
|
||||
{
|
||||
IO->outPutBytes((uint8_t *)&tempBuffer, calPackLen(tempBuffer));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* "If the input byte is available, then parse it."
|
||||
*
|
||||
* The function is a loop that runs forever. It calls the IO->inPutByte() function to get a byte from
|
||||
* the serial port. If the byte is available, then it calls the parser() function to parse the byte
|
||||
*/
|
||||
void amovGimbal::amovGimbalBase::parserLoop(void)
|
||||
{
|
||||
uint8_t temp;
|
||||
|
||||
while (1)
|
||||
{
|
||||
if (IO->inPutByte(&temp))
|
||||
{
|
||||
parser(temp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void amovGimbal::amovGimbalBase::sendLoop(void)
|
||||
{
|
||||
while (1)
|
||||
{
|
||||
send();
|
||||
}
|
||||
}
|
||||
|
||||
void amovGimbal::amovGimbalBase::mainLoop(void)
|
||||
{
|
||||
uint8_t tempBuffer[MAX_PACK_SIZE];
|
||||
|
||||
while (1)
|
||||
{
|
||||
if (getRxPack(tempBuffer))
|
||||
{
|
||||
msgCustomCallback(tempBuffer);
|
||||
convert(tempBuffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void amovGimbal::amovGimbalBase::stackStart(void)
|
||||
{
|
||||
if (!this->IO->isOpen())
|
||||
{
|
||||
this->IO->open();
|
||||
}
|
||||
std::thread parserLoop(&amovGimbalBase::parserLoop, this);
|
||||
std::thread sendLoop(&amovGimbalBase::sendLoop, this);
|
||||
parserLoop.detach();
|
||||
sendLoop.detach();
|
||||
}
|
||||
/**
|
||||
* It starts two threads, one for reading data from the serial port and one for sending data to the
|
||||
* serial port
|
||||
*/
|
||||
void amovGimbal::gimbal::startStack(void)
|
||||
{
|
||||
((amovGimbalBase *)(this->dev))->stackStart();
|
||||
}
|
||||
|
||||
void amovGimbal::gimbal::setParserCallback(amovGimbal::pStateInvoke callback)
|
||||
{
|
||||
((amovGimbalBase *)(this->dev))->updateGimbalStateCallback = callback;
|
||||
}
|
||||
|
||||
void amovGimbal::amovGimbalBase::parserStart(pStateInvoke callback)
|
||||
{
|
||||
this->updateGimbalStateCallback = callback;
|
||||
|
||||
std::thread mainLoop(&amovGimbalBase::mainLoop, this);
|
||||
|
||||
mainLoop.detach();
|
||||
}
|
||||
/**
|
||||
* The function creates a thread that runs the mainLoop function
|
||||
*/
|
||||
void amovGimbal::gimbal::parserAuto(pStateInvoke callback)
|
||||
{
|
||||
((amovGimbalBase *)(this->dev))->parserStart(callback);
|
||||
}
|
||||
|
||||
void amovGimbal::gimbal::setMsgCallback(pMsgInvoke callback)
|
||||
{
|
||||
((amovGimbalBase *)(this->dev))->msgCustomCallback = callback;
|
||||
}
|
||||
|
||||
amovGimbal::AMOV_GIMBAL_STATE_T amovGimbal::gimbal::getGimabalState(void)
|
||||
{
|
||||
((amovGimbalBase *)(this->dev))->mState.lock();
|
||||
AMOV_GIMBAL_STATE_T temp = ((amovGimbalBase *)(this->dev))->state;
|
||||
((amovGimbalBase *)(this->dev))->mState.unlock();
|
||||
return temp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Default implementation of interface functions, not pure virtual functions for ease of extension.
|
||||
*/
|
||||
void amovGimbal::IamovGimbalBase::nodeSet(SET uint32_t _self, SET uint32_t _remote)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimabalHome(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimbalFocus(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimbalROI(const amovGimbal::AMOV_GIMBAL_ROI_T area)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::takePic(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::attitudeCorrection(const AMOV_GIMBAL_QUATERNION_T &quaterion, const AMOV_GIMBAL_VELOCITY_T &speed, const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::attitudeCorrection(const AMOV_GIMBAL_POS_T &pos, const AMOV_GIMBAL_VELOCITY_T &seppd, const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::extensionFuntions(void *cmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* The function creates a new gimbal object, which is a pointer to a new amovGimbal object, which is a
|
||||
* pointer to a new Gimbal object, which is a pointer to a new IOStreamBase object
|
||||
*
|
||||
* @param type the type of the device, which is the same as the name of the class
|
||||
* @param _IO The IOStreamBase object that is used to communicate with the device.
|
||||
* @param _self the node ID of the device
|
||||
* @param _remote the node ID of the remote device
|
||||
*/
|
||||
amovGimbal::gimbal::gimbal(const std::string &type, IOStreamBase *_IO,
|
||||
uint32_t _self, uint32_t _remote)
|
||||
{
|
||||
typeName = type;
|
||||
IO = _IO;
|
||||
|
||||
dev = amovGimbalCreator::createAmovGimbal(typeName, IO);
|
||||
|
||||
dev->nodeSet(_self, _remote);
|
||||
}
|
||||
|
||||
amovGimbal::gimbal::~gimbal()
|
||||
{
|
||||
// 先干掉请求线程
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
delete dev;
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
/*
|
||||
* @Description: External interface of amov gimbals
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-27 18:34:26
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-08-16 22:21:28
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/amov_gimbal.h
|
||||
*/
|
||||
|
||||
#ifndef AMOV_GIMBAL_H
|
||||
#define AMOV_GIMBAL_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <iostream>
|
||||
|
||||
#include "amov_gimbal_struct.h"
|
||||
|
||||
#define MAX_QUEUE_SIZE 100
|
||||
namespace amovGimbal
|
||||
{
|
||||
#define IN
|
||||
#define OUT
|
||||
#define SET
|
||||
|
||||
static inline void idleCallback(double &frameAngleRoll, double &frameAnglePitch, double &frameAngleYaw,
|
||||
double &imuAngleRoll, double &imuAnglePitch, double &imuAngleYaw,
|
||||
double &fovX, double &fovY)
|
||||
{
|
||||
}
|
||||
static inline void idleMsgCallback(void *)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
// Control data input and output
|
||||
class IOStreamBase
|
||||
{
|
||||
public:
|
||||
IOStreamBase() {}
|
||||
virtual ~IOStreamBase() {}
|
||||
|
||||
virtual bool open() = 0;
|
||||
virtual bool close() = 0;
|
||||
virtual bool isOpen() = 0;
|
||||
virtual bool isBusy() = 0;
|
||||
// These two functions need to be thread-safe
|
||||
virtual bool inPutByte(IN uint8_t *byte) = 0;
|
||||
virtual uint32_t outPutBytes(IN uint8_t *byte, uint32_t lenght) = 0;
|
||||
};
|
||||
|
||||
// Device interface
|
||||
class IamovGimbalBase
|
||||
{
|
||||
public:
|
||||
IamovGimbalBase() {}
|
||||
virtual ~IamovGimbalBase() {}
|
||||
// functions
|
||||
virtual void nodeSet(SET uint32_t _self, SET uint32_t _remote);
|
||||
virtual uint32_t setGimabalPos(const AMOV_GIMBAL_POS_T &pos);
|
||||
virtual uint32_t setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed);
|
||||
virtual uint32_t setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed);
|
||||
virtual uint32_t setGimabalHome(void);
|
||||
virtual uint32_t setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
virtual uint32_t setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
virtual uint32_t setGimbalROI(const AMOV_GIMBAL_ROI_T area);
|
||||
virtual uint32_t takePic(void);
|
||||
virtual uint32_t setVideo(const AMOV_GIMBAL_VIDEO_T newState);
|
||||
virtual uint32_t attitudeCorrection(const AMOV_GIMBAL_QUATERNION_T &quaterion, const AMOV_GIMBAL_VELOCITY_T &speed, const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData);
|
||||
virtual uint32_t attitudeCorrection(const AMOV_GIMBAL_POS_T &pos, const AMOV_GIMBAL_VELOCITY_T &seppd, const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData);
|
||||
virtual uint32_t extensionFuntions(void *cmd);
|
||||
};
|
||||
|
||||
class gimbal
|
||||
{
|
||||
private:
|
||||
std::string typeName;
|
||||
IOStreamBase *IO;
|
||||
|
||||
public:
|
||||
// Instantiated device handle
|
||||
IamovGimbalBase *dev;
|
||||
|
||||
// Protocol stack function items
|
||||
void startStack(void);
|
||||
void parserAuto(pStateInvoke callback = idleCallback);
|
||||
void setParserCallback(pStateInvoke callback);
|
||||
void setMsgCallback(pMsgInvoke callback);
|
||||
|
||||
AMOV_GIMBAL_STATE_T getGimabalState(void);
|
||||
|
||||
std::string name()
|
||||
{
|
||||
return typeName;
|
||||
}
|
||||
gimbal(const std::string &type, IOStreamBase *_IO,
|
||||
uint32_t _self = 0x02, uint32_t _remote = 0X80);
|
||||
~gimbal();
|
||||
};
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-11-24 15:48:47
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-05 16:27:10
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/amov_gimbal_factory.cpp
|
||||
*/
|
||||
|
||||
#include "amov_gimbal_private.h"
|
||||
#include "g1_gimbal_driver.h"
|
||||
#include "Q10f_gimbal_driver.h"
|
||||
#include "AT10_gimbal_driver.h"
|
||||
#include "GX40_gimbal_driver.h"
|
||||
|
||||
#include <map>
|
||||
#include <iterator>
|
||||
|
||||
namespace amovGimbalFactory
|
||||
{
|
||||
typedef amovGimbal::amovGimbalBase *(*createCallback)(amovGimbal::IOStreamBase *_IO);
|
||||
typedef std::map<std::string, createCallback> callbackMap;
|
||||
|
||||
callbackMap amovGimbals =
|
||||
{
|
||||
{"G1", g1GimbalDriver::creat},
|
||||
{"Q10f", Q10fGimbalDriver::creat},
|
||||
{"AT10", AT10GimbalDriver::creat},
|
||||
{"GX40", GX40GimbalDriver::creat}};
|
||||
|
||||
/* The amovGimbalCreator class is a factory class that creates an instance of the amovGimbal class */
|
||||
// Factory used to create the gimbal instance
|
||||
class amovGimbalCreator
|
||||
{
|
||||
public:
|
||||
static amovGimbal::amovGimbalBase *createAmovGimbal(const std::string &type, amovGimbal::IOStreamBase *_IO)
|
||||
{
|
||||
callbackMap::iterator temp = amovGimbals.find(type);
|
||||
|
||||
if (temp != amovGimbals.end())
|
||||
{
|
||||
return (temp->second)(_IO);
|
||||
}
|
||||
std::cout << type << " is Unsupported device type!" << std::endl;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
private:
|
||||
amovGimbalCreator()
|
||||
{
|
||||
}
|
||||
static amovGimbalCreator *pInstance;
|
||||
static amovGimbalCreator *getInstance()
|
||||
{
|
||||
if (pInstance == NULL)
|
||||
{
|
||||
pInstance = new amovGimbalCreator();
|
||||
}
|
||||
return pInstance;
|
||||
}
|
||||
|
||||
~amovGimbalCreator();
|
||||
};
|
||||
} // namespace amovGimbalFactory
|
||||
|
||||
/**
|
||||
* The function creates a new gimbal object, which is a pointer to a new amovGimbal object, which is a
|
||||
* pointer to a new Gimbal object, which is a pointer to a new IOStreamBase object
|
||||
*
|
||||
* @param type the type of the device, which is the same as the name of the class
|
||||
* @param _IO The IOStreamBase object that is used to communicate with the device.
|
||||
* @param _self the node ID of the device
|
||||
* @param _remote the node ID of the remote device
|
||||
*/
|
||||
amovGimbal::gimbal::gimbal(const std::string &type, IOStreamBase *_IO,
|
||||
uint32_t _self, uint32_t _remote)
|
||||
{
|
||||
typeName = type;
|
||||
|
||||
devHandle = amovGimbalFactory::amovGimbalCreator::createAmovGimbal(typeName, _IO);
|
||||
|
||||
((amovGimbalBase *)(devHandle))->nodeSet(_self, _remote);
|
||||
}
|
||||
|
||||
amovGimbal::gimbal::~gimbal()
|
||||
{
|
||||
// 先干掉请求线程
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
delete ((amovGimbalBase *)(devHandle));
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-11-24 16:00:28
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-05 17:18:34
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/amov_gimbal_interface.cpp
|
||||
*/
|
||||
#include "amov_gimbal_private.h"
|
||||
|
||||
// must realize
|
||||
void amovGimbal::gimbal::startStack(void)
|
||||
{
|
||||
((amovGimbalBase *)(this->devHandle))->stackStart();
|
||||
}
|
||||
|
||||
void amovGimbal::gimbal::parserAuto(pAmovGimbalStateInvoke callback, void *caller)
|
||||
{
|
||||
((amovGimbalBase *)(this->devHandle))->parserStart(callback, caller);
|
||||
}
|
||||
|
||||
void amovGimbal::gimbal::setParserCallback(pAmovGimbalStateInvoke callback, void *caller)
|
||||
{
|
||||
((amovGimbalBase *)(this->devHandle))->updateGimbalStateCallback = callback;
|
||||
((amovGimbalBase *)(this->devHandle))->updataCaller = caller;
|
||||
}
|
||||
|
||||
void amovGimbal::gimbal::setMsgCallback(pAmovGimbalMsgInvoke callback, void *caller)
|
||||
{
|
||||
((amovGimbalBase *)(this->devHandle))->msgCustomCallback = callback;
|
||||
((amovGimbalBase *)(this->devHandle))->msgCaller = caller;
|
||||
}
|
||||
|
||||
AMOV_GIMBAL_STATE_T amovGimbal::gimbal::getGimabalState(void)
|
||||
{
|
||||
((amovGimbalBase *)(this->devHandle))->mState.lock();
|
||||
AMOV_GIMBAL_STATE_T temp = ((amovGimbalBase *)(this->devHandle))->state;
|
||||
((amovGimbalBase *)(this->devHandle))->mState.unlock();
|
||||
return temp;
|
||||
}
|
||||
|
||||
// gimbal funtions maybe realize
|
||||
uint32_t amovGimbal::gimbal::setGimabalPos(const AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimabalPos(pos);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimabalPos(const AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimabalSpeed(speed);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimabalFollowSpeed(followSpeed);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::setGimabalHome(void)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimabalHome();
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimabalHome(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimbalZoom(zoom, targetRate);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimbalFocus(zoom, targetRate);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::setGimbalROI(const AMOV_GIMBAL_ROI_T area)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimbalROI(area);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGimbalROI(const AMOV_GIMBAL_ROI_T area)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::takePic(void)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->takePic();
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::takePic(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::setVideo(const AMOV_GIMBAL_VIDEO_T newState)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setVideo(newState);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setVideo(const AMOV_GIMBAL_VIDEO_T newState)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::attitudeCorrection(const AMOV_GIMBAL_QUATERNION_T &quaterion,
|
||||
const AMOV_GIMBAL_VELOCITY_T &speed,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->attitudeCorrection(quaterion, speed, acc, extenData);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::attitudeCorrection(const AMOV_GIMBAL_QUATERNION_T &quaterion,
|
||||
const AMOV_GIMBAL_VELOCITY_T &speed,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::attitudeCorrection(const AMOV_GIMBAL_POS_T &pos,
|
||||
const AMOV_GIMBAL_VELOCITY_T &speed,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->attitudeCorrection(pos, speed, acc, extenData);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::attitudeCorrection(const AMOV_GIMBAL_POS_T &pos,
|
||||
const AMOV_GIMBAL_VELOCITY_T &speed,
|
||||
const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::setGNSSInfo(float lng, float lat, float alt, uint32_t nState, float relAlt)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGNSSInfo(lng, lat, alt, nState, relAlt);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::setGNSSInfo(float lng, float lat, float alt, uint32_t nState, float relAlt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::gimbal::extensionFuntions(void *cmd)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->extensionFuntions(cmd);
|
||||
}
|
||||
|
||||
uint32_t amovGimbal::IamovGimbalBase::extensionFuntions(void *cmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool amovGimbal::gimbal::setGimbalPosBlock(const AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimbalPosBlock(pos);
|
||||
}
|
||||
|
||||
bool amovGimbal::IamovGimbalBase::setGimbalPosBlock(const AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool amovGimbal::gimbal::setGimabalHomeBlock(void)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimabalHomeBlock();
|
||||
}
|
||||
|
||||
bool amovGimbal::IamovGimbalBase::setGimabalHomeBlock(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool amovGimbal::gimbal::setGimbalZoomBlock(float targetRate)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->setGimbalZoomBlock(targetRate);
|
||||
}
|
||||
|
||||
bool amovGimbal::IamovGimbalBase::setGimbalZoomBlock(float targetRate)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool amovGimbal::gimbal::takePicBlock(void)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->takePicBlock();
|
||||
}
|
||||
|
||||
bool amovGimbal::IamovGimbalBase::takePicBlock(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool amovGimbal::gimbal::calibrationBlock(void)
|
||||
{
|
||||
return ((amovGimbalBase *)(this->devHandle))->calibrationBlock();
|
||||
}
|
||||
|
||||
bool amovGimbal::IamovGimbalBase::calibrationBlock(void)
|
||||
{
|
||||
return false;
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-11-27 12:28:32
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-06 11:36:30
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/amov_gimbal_interface_c.cpp
|
||||
*/
|
||||
#include "amov_gimbal_private.h"
|
||||
#include <string>
|
||||
|
||||
void amovGimbalSetRcvBytes(pAmovGimbalInputBytesInvoke callbaclk, void *handle, void *caller)
|
||||
{
|
||||
((amovGimbal::gimbal *)handle)->setRcvBytes(callbaclk, caller);
|
||||
}
|
||||
|
||||
void amovGimbalSetSendBytes(pAmovGimbalOutputBytesInvoke callbaclk, void *handle, void *caller)
|
||||
{
|
||||
((amovGimbal::gimbal *)handle)->setSendBytes(callbaclk, caller);
|
||||
}
|
||||
|
||||
void amovGimbalInBytesCallback(uint8_t *pData, uint32_t len, void *handle)
|
||||
{
|
||||
amovGimbal::gimbal::inBytesCallback(pData, len, (amovGimbal::gimbal *)handle);
|
||||
}
|
||||
|
||||
void amovGimbalCreat(char *type, uint32_t selfId, uint32_t gimbalId, void *handle)
|
||||
{
|
||||
std::string strType = type;
|
||||
handle = new amovGimbal::gimbal(strType, nullptr, selfId, gimbalId);
|
||||
}
|
||||
|
||||
void amovGimbalStart(pAmovGimbalStateInvoke callback, void *handle, void *caller)
|
||||
{
|
||||
((amovGimbal::gimbal *)handle)->startStack();
|
||||
((amovGimbal::gimbal *)handle)->parserAuto(callback, caller);
|
||||
}
|
||||
|
||||
void amovGimbalChangeStateCallback(pAmovGimbalStateInvoke callback, void *handle, void *caller)
|
||||
{
|
||||
((amovGimbal::gimbal *)handle)->setParserCallback(callback, caller);
|
||||
}
|
||||
|
||||
void amovGimbalSetMsgCallback(pAmovGimbalMsgInvoke callback, void *handle, void *caller)
|
||||
{
|
||||
((amovGimbal::gimbal *)handle)->setMsgCallback(callback, caller);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalSetGimabalPos(AMOV_GIMBAL_POS_T *pos, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimabalPos(*pos);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalSetGimabalSpeed(AMOV_GIMBAL_POS_T *speed, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimabalSpeed(*speed);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalSetGimabalFollowSpeed(AMOV_GIMBAL_POS_T *followSpeed, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimabalFollowSpeed(*followSpeed);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalSetGimabalHome(void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimabalHome();
|
||||
}
|
||||
|
||||
uint32_t amovGimbalSetGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimbalZoom(zoom, targetRate);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalSetGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimbalFocus(zoom, targetRate);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalSetGimbalROI(AMOV_GIMBAL_ROI_T *area, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimbalROI(*area);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalTakePic(void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->takePic();
|
||||
}
|
||||
|
||||
uint32_t amovGimbalSetVideo(AMOV_GIMBAL_VIDEO_T newState, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setVideo(newState);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalAttitudeCorrectionQ(AMOV_GIMBAL_QUATERNION_T *quaterion,
|
||||
AMOV_GIMBAL_VELOCITY_T *speed,
|
||||
AMOV_GIMBAL_VELOCITY_T *acc, void *extenData, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->attitudeCorrection(*quaterion, *speed, *acc, extenData);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalAttitudeCorrectionE(AMOV_GIMBAL_POS_T *pos,
|
||||
AMOV_GIMBAL_VELOCITY_T *speed,
|
||||
AMOV_GIMBAL_VELOCITY_T *acc, void *extenData, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->attitudeCorrection(*pos, *speed, *acc, extenData);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalSetGNSSInfo(float lng, float lat, float alt, uint32_t nState, float relAlt, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGNSSInfo(lng, lat, alt, nState, relAlt);
|
||||
}
|
||||
|
||||
uint32_t amovGimbalExtensionFuntions(void *cmd, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->extensionFuntions(cmd);
|
||||
}
|
||||
|
||||
void getGimabalState(AMOV_GIMBAL_STATE_T *state, void *handle)
|
||||
{
|
||||
*state = ((amovGimbal::gimbal *)handle)->getGimabalState();
|
||||
}
|
||||
|
||||
void getGimbalType(char *type, void *handle)
|
||||
{
|
||||
std::string temp = ((amovGimbal::gimbal *)handle)->name();
|
||||
temp.copy(type, temp.size(), 0);
|
||||
}
|
||||
|
||||
bool amovGimbalSetGimbalPosBlock(AMOV_GIMBAL_POS_T *pos, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimbalPosBlock(*pos);
|
||||
}
|
||||
|
||||
bool amovGimbalSetGimabalHomeBlock(void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimabalHomeBlock();
|
||||
}
|
||||
|
||||
bool amovGimbalSetGimbalZoomBlock(float targetRate, void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->setGimbalZoomBlock(targetRate);
|
||||
}
|
||||
|
||||
bool amovGimbalTakePicBlock(void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->takePicBlock();
|
||||
}
|
||||
|
||||
bool amovGimbalCalibrationBlock(void *handle)
|
||||
{
|
||||
return ((amovGimbal::gimbal *)handle)->calibrationBlock();
|
||||
}
|
|
@ -3,8 +3,8 @@
|
|||
* @Author : Aiyangsky
|
||||
* @Date : 2023-05-13 10:39:20
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-08-16 22:30:53
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/amov_gimbal_private.h
|
||||
* @LastEditTime: 2023-12-05 17:18:06
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/amov_gimbal_private.h
|
||||
*/
|
||||
#ifndef __AMOV_GIMABL_PRIVATE_H
|
||||
#define __AMOV_GIMABL_PRIVATE_H
|
||||
|
@ -12,12 +12,12 @@
|
|||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <iostream>
|
||||
|
||||
#include <thread>
|
||||
#include <unistd.h>
|
||||
#include <mutex>
|
||||
|
||||
#include "amov_gimbal.h"
|
||||
#include "amovGimbal/amov_gimbal.h"
|
||||
#include "amovGimbal/amov_gimbal_c.h"
|
||||
|
||||
#include "Ring_Fifo.h"
|
||||
#include "amov_tool.h"
|
||||
|
@ -28,14 +28,28 @@ namespace amovGimbal
|
|||
public:
|
||||
AMOV_GIMBAL_STATE_T state;
|
||||
std::mutex mState;
|
||||
IOStreamBase *IO;
|
||||
pStateInvoke updateGimbalStateCallback;
|
||||
pMsgInvoke msgCustomCallback = idleMsgCallback;
|
||||
|
||||
// IO类
|
||||
IOStreamBase *IO = nullptr;
|
||||
// 适用于C的函数指针
|
||||
void *inBytesCaller = nullptr;
|
||||
pAmovGimbalInputBytesInvoke inBytes = nullptr;
|
||||
void *outBytesCaller = nullptr;
|
||||
pAmovGimbalOutputBytesInvoke outBytes = nullptr;
|
||||
|
||||
void *updataCaller = nullptr;
|
||||
pAmovGimbalStateInvoke updateGimbalStateCallback;
|
||||
void *msgCaller = nullptr;
|
||||
pAmovGimbalMsgInvoke msgCustomCallback = idleMsgCallback;
|
||||
|
||||
fifoRing *rxQueue;
|
||||
fifoRing *txQueue;
|
||||
|
||||
PamovGimbalBase(SET IOStreamBase *_IO)
|
||||
std::thread::native_handle_type parserThreadHanle = 0;
|
||||
std::thread::native_handle_type sendThreadHanle = 0;
|
||||
std::thread::native_handle_type stackThreadHanle = 0;
|
||||
|
||||
PamovGimbalBase(IOStreamBase *_IO)
|
||||
{
|
||||
IO = _IO;
|
||||
}
|
||||
|
@ -49,9 +63,45 @@ namespace amovGimbal
|
|||
{
|
||||
delete rxQueue;
|
||||
}
|
||||
// set thread kill anytime
|
||||
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
|
||||
parserThreadHanle = parserThreadHanle == 0 ? 0 : pthread_cancel(parserThreadHanle);
|
||||
sendThreadHanle = sendThreadHanle == 0 ? 0 : pthread_cancel(sendThreadHanle);
|
||||
stackThreadHanle = stackThreadHanle == 0 ? 0 : pthread_cancel(stackThreadHanle);
|
||||
}
|
||||
};
|
||||
|
||||
// Device interface
|
||||
class IamovGimbalBase
|
||||
{
|
||||
public:
|
||||
IamovGimbalBase() {}
|
||||
virtual ~IamovGimbalBase() {}
|
||||
|
||||
// non-block functions
|
||||
virtual uint32_t setGimabalPos(const AMOV_GIMBAL_POS_T &pos);
|
||||
virtual uint32_t setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed);
|
||||
virtual uint32_t setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed);
|
||||
|
||||
virtual uint32_t setGimabalHome(void);
|
||||
virtual uint32_t setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
virtual uint32_t setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
|
||||
virtual uint32_t setGimbalROI(const AMOV_GIMBAL_ROI_T area);
|
||||
virtual uint32_t takePic(void);
|
||||
virtual uint32_t setVideo(const AMOV_GIMBAL_VIDEO_T newState);
|
||||
virtual uint32_t attitudeCorrection(const AMOV_GIMBAL_QUATERNION_T &quaterion, const AMOV_GIMBAL_VELOCITY_T &speed, const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData);
|
||||
virtual uint32_t attitudeCorrection(const AMOV_GIMBAL_POS_T &pos, const AMOV_GIMBAL_VELOCITY_T &seppd, const AMOV_GIMBAL_VELOCITY_T &acc, void *extenData);
|
||||
virtual uint32_t setGNSSInfo(float lng, float lat, float alt, uint32_t nState, float relAlt);
|
||||
virtual uint32_t extensionFuntions(void *cmd);
|
||||
|
||||
// block functions
|
||||
virtual bool setGimbalPosBlock(const AMOV_GIMBAL_POS_T &pos);
|
||||
virtual bool setGimabalHomeBlock(void);
|
||||
virtual bool setGimbalZoomBlock(float targetRate);
|
||||
virtual bool takePicBlock(void);
|
||||
virtual bool calibrationBlock(void);
|
||||
};
|
||||
|
||||
class amovGimbalBase : public IamovGimbalBase, public PamovGimbalBase
|
||||
{
|
||||
public:
|
||||
|
@ -68,7 +118,9 @@ namespace amovGimbal
|
|||
virtual void mainLoop(void);
|
||||
|
||||
virtual void stackStart(void);
|
||||
virtual void parserStart(amovGimbal::pStateInvoke callback);
|
||||
virtual void parserStart(pAmovGimbalStateInvoke callback, void *caller);
|
||||
|
||||
virtual void nodeSet(SET uint32_t _self, SET uint32_t _remote);
|
||||
|
||||
public:
|
||||
amovGimbalBase(IOStreamBase *_IO);
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
/*
|
||||
* @Description:
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2023-11-24 15:55:37
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-12-05 17:19:19
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/amov_gimbal_realize.cpp
|
||||
*/
|
||||
|
||||
#include "amov_gimbal_private.h"
|
||||
|
||||
#include <thread>
|
||||
|
||||
#define MAX_PACK_SIZE 280
|
||||
|
||||
/**
|
||||
* This is a constructor for the amovGimbalBase class that initializes its parent classes with an
|
||||
* IOStreamBase object.
|
||||
*
|
||||
* @param _IO _IO is a pointer to an object of type amovGimbal::IOStreamBase, which is the base class
|
||||
* for input/output streams used by the amovGimbal class. This parameter is passed to the constructor
|
||||
* of amovGimbalBase, which is a derived class of I
|
||||
*/
|
||||
amovGimbal::amovGimbalBase::amovGimbalBase(amovGimbal::IOStreamBase *_IO) : amovGimbal::IamovGimbalBase(), amovGimbal::PamovGimbalBase(_IO)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* The function is a destructor that sleeps for 50 milliseconds and closes an IO object.
|
||||
*/
|
||||
amovGimbal::amovGimbalBase::~amovGimbalBase()
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
IO->close();
|
||||
}
|
||||
|
||||
/**
|
||||
* This function retrieves a packet from a ring buffer queue and returns a boolean value indicating
|
||||
* whether the operation was successful or not.
|
||||
*
|
||||
* @param void void is a keyword in C++ that represents the absence of a type. In this function, it is
|
||||
* used to indicate that the function does not return any value.
|
||||
*
|
||||
* @return a boolean value, which indicates whether or not a data packet was successfully retrieved
|
||||
* from a ring buffer queue.
|
||||
*/
|
||||
bool amovGimbal::amovGimbalBase::getRxPack(OUT void *pack)
|
||||
{
|
||||
bool state = false;
|
||||
state = rxQueue->outCell(pack);
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* This function sends data from a buffer to an output device if it is not busy and open.
|
||||
*/
|
||||
void amovGimbal::amovGimbalBase::send(void)
|
||||
{
|
||||
uint8_t tempBuffer[MAX_PACK_SIZE];
|
||||
|
||||
if (IO == nullptr)
|
||||
{
|
||||
while (1)
|
||||
{
|
||||
if (txQueue->outCell(&tempBuffer))
|
||||
{
|
||||
this->outBytes((uint8_t *)&tempBuffer, calPackLen(tempBuffer), outBytesCaller);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
while (1)
|
||||
{
|
||||
if (!IO->isBusy() && IO->isOpen())
|
||||
{
|
||||
if (txQueue->outCell(&tempBuffer))
|
||||
{
|
||||
IO->outPutBytes((uint8_t *)&tempBuffer, calPackLen(tempBuffer));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* "If the input byte is available, then parse it."
|
||||
*
|
||||
* The function is a loop that runs forever. It calls the IO->inPutByte() function to get a byte from
|
||||
* the serial port. If the byte is available, then it calls the parser() function to parse the byte
|
||||
*/
|
||||
void amovGimbal::amovGimbalBase::parserLoop(void)
|
||||
{
|
||||
uint8_t temp[65536];
|
||||
uint32_t i = 0, getCount = 0;
|
||||
if (IO == nullptr)
|
||||
{
|
||||
while (1)
|
||||
{
|
||||
getCount = inBytes(temp, inBytesCaller);
|
||||
|
||||
for (i = 0; i < getCount; i++)
|
||||
{
|
||||
parser(temp[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
while (1)
|
||||
{
|
||||
getCount = IO->inPutBytes(temp);
|
||||
|
||||
for (i = 0; i < getCount; i++)
|
||||
{
|
||||
parser(temp[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void amovGimbal::amovGimbalBase::sendLoop(void)
|
||||
{
|
||||
send();
|
||||
}
|
||||
|
||||
void amovGimbal::amovGimbalBase::mainLoop(void)
|
||||
{
|
||||
uint8_t tempBuffer[MAX_PACK_SIZE];
|
||||
|
||||
while (1)
|
||||
{
|
||||
if (getRxPack(tempBuffer))
|
||||
{
|
||||
msgCustomCallback(tempBuffer, msgCaller);
|
||||
convert(tempBuffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void amovGimbal::amovGimbalBase::stackStart(void)
|
||||
{
|
||||
if (!this->IO->isOpen() && this->IO != nullptr)
|
||||
{
|
||||
this->IO->open();
|
||||
}
|
||||
|
||||
// 当且仅当需要库主动查询时才启用解析器线程
|
||||
if (inBytes != nullptr || this->IO != nullptr)
|
||||
{
|
||||
std::thread parserLoop(&amovGimbalBase::parserLoop, this);
|
||||
this->parserThreadHanle = parserLoop.native_handle();
|
||||
parserLoop.detach();
|
||||
}
|
||||
|
||||
std::thread sendLoop(&amovGimbalBase::sendLoop, this);
|
||||
this->sendThreadHanle = sendLoop.native_handle();
|
||||
sendLoop.detach();
|
||||
}
|
||||
|
||||
void amovGimbal::amovGimbalBase::parserStart(pAmovGimbalStateInvoke callback, void *caller)
|
||||
{
|
||||
this->updateGimbalStateCallback = callback;
|
||||
this->updataCaller = caller;
|
||||
|
||||
std::thread mainLoop(&amovGimbalBase::mainLoop, this);
|
||||
|
||||
this->stackThreadHanle = mainLoop.native_handle();
|
||||
|
||||
mainLoop.detach();
|
||||
}
|
||||
|
||||
void amovGimbal::amovGimbalBase::nodeSet(SET uint32_t _self, SET uint32_t _remote)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void amovGimbal::gimbal::setRcvBytes(pAmovGimbalInputBytesInvoke callbaclk, void *caller)
|
||||
{
|
||||
((amovGimbal::amovGimbalBase *)(this->devHandle))->inBytes = callbaclk;
|
||||
((amovGimbal::amovGimbalBase *)(this->devHandle))->inBytesCaller = caller;
|
||||
}
|
||||
|
||||
void amovGimbal::gimbal::setSendBytes(pAmovGimbalOutputBytesInvoke callbaclk, void *caller)
|
||||
{
|
||||
((amovGimbal::amovGimbalBase *)(this->devHandle))->outBytes = callbaclk;
|
||||
((amovGimbal::amovGimbalBase *)(this->devHandle))->outBytesCaller = caller;
|
||||
}
|
||||
|
||||
void amovGimbal::gimbal::inBytesCallback(uint8_t *pData, uint32_t len, gimbal *handle)
|
||||
{
|
||||
uint32_t i = 0;
|
||||
for (i = 0; i < len; i++)
|
||||
{
|
||||
((amovGimbalBase *)((handle)->devHandle))->parser(pData[i]);
|
||||
}
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
/*
|
||||
* @Description: Common Data Structures of gimbal
|
||||
* @Author: L LC @amov
|
||||
* @Date: 2022-10-31 11:56:43
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-08-16 22:21:46
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/amov_gimbal_struct.h
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#ifndef __AMOV_GIMABL_STRUCT_H
|
||||
#define __AMOV_GIMABL_STRUCT_H
|
||||
|
||||
namespace amovGimbal
|
||||
{
|
||||
typedef void (*pStateInvoke)(double &frameAngleRoll, double &frameAnglePitch, double &frameAngleYaw,
|
||||
double &imuAngleRoll, double &imuAnglePitch, double &imuAngleYaw,
|
||||
double &fovX, double &fovY);
|
||||
typedef void (*pMsgInvoke)(void *msg);
|
||||
|
||||
typedef enum
|
||||
{
|
||||
AMOV_GIMBAL_MODE_LOCK,
|
||||
AMOV_GIMBAL_MODE_NULOCK,
|
||||
} AMOV_GIMBAL_MODE_T;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
AMOV_GIMBAL_VIDEO_TAKE,
|
||||
AMOV_GIMBAL_VIDEO_OFF
|
||||
} AMOV_GIMBAL_VIDEO_T;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
AMOV_GIMBAL_ZOOM_IN,
|
||||
AMOV_GIMBAL_ZOOM_OUT,
|
||||
AMOV_GIMBAL_ZOOM_STOP
|
||||
} AMOV_GIMBAL_ZOOM_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double yaw;
|
||||
double roll;
|
||||
double pitch;
|
||||
} AMOV_GIMBAL_POS_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double x;
|
||||
double y;
|
||||
} AMOV_GIMBAL_FOV_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
AMOV_GIMBAL_MODE_T workMode;
|
||||
AMOV_GIMBAL_VIDEO_T video;
|
||||
AMOV_GIMBAL_POS_T abs;
|
||||
AMOV_GIMBAL_POS_T rel;
|
||||
AMOV_GIMBAL_POS_T maxFollow;
|
||||
AMOV_GIMBAL_FOV_T fov;
|
||||
} AMOV_GIMBAL_STATE_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint32_t centreX;
|
||||
uint32_t centreY;
|
||||
uint32_t hight;
|
||||
uint32_t width;
|
||||
} AMOV_GIMBAL_ROI_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double q0;
|
||||
double q1;
|
||||
double q2;
|
||||
double q3;
|
||||
} AMOV_GIMBAL_QUATERNION_T;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double x;
|
||||
double y;
|
||||
double z;
|
||||
} AMOV_GIMBAL_VELOCITY_T;
|
||||
|
||||
} // namespace amovGimbal
|
||||
|
||||
#endif
|
|
@ -3,10 +3,12 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2023-07-31 18:30:33
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-07-31 18:55:18
|
||||
* @FilePath: /gimbal-sdk-multi-platform/src/amov_tool.h
|
||||
* @LastEditTime: 2023-12-05 16:26:05
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/driver/src/amov_tool.h
|
||||
*/
|
||||
|
||||
#ifndef __AMOVGIMABL_TOOL_H
|
||||
#define __AMOVGIMABL_TOOL_H
|
||||
namespace amovGimbalTools
|
||||
{
|
||||
static inline unsigned short conversionBigLittle(unsigned short value)
|
||||
|
@ -27,3 +29,4 @@ namespace amovGimbalTools
|
|||
return temp;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -3,11 +3,10 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2023-04-12 09:12:52
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-04-18 11:37:42
|
||||
* @FilePath: /spirecv-gimbal-sdk/gimbal_ctrl/sv_gimbal.cpp
|
||||
* @LastEditTime: 2023-12-05 17:33:29
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/sv_gimbal.cpp
|
||||
*/
|
||||
#include "amov_gimbal.h"
|
||||
#include "amov_gimbal_struct.h"
|
||||
#include "amovGimbal/amov_gimbal.h"
|
||||
|
||||
#include "sv_gimbal.h"
|
||||
#include "sv_gimbal_io.hpp"
|
||||
|
@ -20,18 +19,42 @@ namespace sv
|
|||
std::map<std::string, void *> Gimbal::IOList;
|
||||
std::mutex Gimbal::IOListMutex;
|
||||
|
||||
std::map<GimbalType, std::string> gimbaltypeList =
|
||||
{
|
||||
{GimbalType::G1, "G1"},
|
||||
{GimbalType::Q10f, "Q10f"},
|
||||
{GimbalType::AT10, "AT10"}};
|
||||
|
||||
std::string &cvGimbalType2Str(const GimbalType &type)
|
||||
typedef struct
|
||||
{
|
||||
std::map<GimbalType, std::string>::iterator temp = gimbaltypeList.find(type);
|
||||
std::string name;
|
||||
GimbalLink supportLink;
|
||||
} gimbalTrait;
|
||||
|
||||
std::map<GimbalType, gimbalTrait> gimbaltypeList =
|
||||
{
|
||||
{GimbalType::G1, {"G1", GimbalLink::SERIAL}},
|
||||
{GimbalType::Q10f, {"Q10f", GimbalLink::SERIAL}},
|
||||
{GimbalType::AT10, {"AT10", GimbalLink::SERIAL | GimbalLink::ETHERNET_TCP}},
|
||||
{GimbalType::GX40, {"GX40", GimbalLink::SERIAL | GimbalLink::ETHERNET_TCP | GimbalLink::ETHERNET_UDP}}};
|
||||
|
||||
/**
|
||||
* The function `svGimbalType2Str` converts a `GimbalType` enum value to its corresponding string
|
||||
* representation.
|
||||
*
|
||||
* @return a reference to a string.
|
||||
*/
|
||||
std::string &svGimbalType2Str(const GimbalType &type)
|
||||
{
|
||||
std::map<GimbalType, gimbalTrait>::iterator temp = gimbaltypeList.find(type);
|
||||
if (temp != gimbaltypeList.end())
|
||||
{
|
||||
return (temp->second);
|
||||
return (temp->second).name;
|
||||
}
|
||||
throw "Error: Unsupported gimbal device type!!!!";
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
GimbalLink &svGimbalTypeFindLinkType(const GimbalType &type)
|
||||
{
|
||||
std::map<GimbalType, gimbalTrait>::iterator temp = gimbaltypeList.find(type);
|
||||
if (temp != gimbaltypeList.end())
|
||||
{
|
||||
return (temp->second).supportLink;
|
||||
}
|
||||
throw "Error: Unsupported gimbal device type!!!!";
|
||||
exit(-1);
|
||||
|
@ -100,17 +123,29 @@ void sv::Gimbal::setNetIp(const std::string &ip)
|
|||
}
|
||||
|
||||
/**
|
||||
* This function sets the network port for a Gimbal object in C++.
|
||||
* The function sets the TCP network port for the Gimbal object.
|
||||
*
|
||||
* @param port The "port" parameter is an integer value that represents the network port number that
|
||||
* the Gimbal object will use for communication. This function sets the value of the "m_net_port"
|
||||
* member variable of the Gimbal object to the value passed in as the "port" parameter.
|
||||
* @param port The parameter "port" is an integer that represents the TCP network port number.
|
||||
*/
|
||||
void sv::Gimbal::setNetPort(const int &port)
|
||||
void sv::Gimbal::setTcpNetPort(const int &port)
|
||||
{
|
||||
this->m_net_port = port;
|
||||
}
|
||||
|
||||
/**
|
||||
* The function sets the UDP network ports for receiving and sending data.
|
||||
*
|
||||
* @param recvPort The recvPort parameter is the port number that the Gimbal object will use to receive
|
||||
* UDP packets.
|
||||
* @param sendPort The sendPort parameter is the port number used for sending data over UDP (User
|
||||
* Datagram Protocol) network communication.
|
||||
*/
|
||||
void sv::Gimbal::setUdpNetPort(const int &recvPort, const int &sendPort)
|
||||
{
|
||||
this->m_net_recv_port = recvPort;
|
||||
this->m_net_send_port = sendPort;
|
||||
}
|
||||
|
||||
/**
|
||||
* The function sets a parser callback for a gimbal device.
|
||||
*
|
||||
|
@ -121,25 +156,31 @@ void sv::Gimbal::setNetPort(const int &port)
|
|||
void sv::Gimbal::setStateCallback(sv::PStateInvoke callback)
|
||||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
pdevTemp->setParserCallback(callback);
|
||||
m_callback = callback;
|
||||
pdevTemp->setParserCallback(sv::Gimbal::gimbalUpdataCallback, this);
|
||||
}
|
||||
|
||||
/**
|
||||
* The function `creatIO` creates an IO object for a gimbal device and adds it to a list of IO objects
|
||||
* if it doesn't already exist.
|
||||
* The function `sv::Gimbal::creatIO` creates an IO object based on the specified gimbal type and link
|
||||
* type, and returns a pointer to the created object.
|
||||
*
|
||||
* @param dev A pointer to an instance of the `Gimbal` class.
|
||||
* @param IO The parameter "IO" is a pointer to an object that represents the input/output (IO) device
|
||||
* for the gimbal. It is used to establish a connection with the gimbal and perform communication
|
||||
* operations.
|
||||
* @param dev The "dev" parameter is a pointer to an object of type "sv::Gimbal". It is used to access
|
||||
* the member variables of the Gimbal object, such as "m_serial_port", "m_serial_baud_rate",
|
||||
* "m_serial_timeout", etc. These variables store information about
|
||||
*
|
||||
* @return a boolean value.
|
||||
* @return a void pointer.
|
||||
*/
|
||||
void *sv::Gimbal::creatIO(sv::Gimbal *dev)
|
||||
{
|
||||
IOListMutex.lock();
|
||||
std::map<std::string, void *>::iterator list = IOList.find(dev->m_serial_port);
|
||||
std::pair<std::string, void *> key("NULL", nullptr);
|
||||
GimbalLink link = svGimbalTypeFindLinkType(dev->m_gimbal_type);
|
||||
|
||||
if ((dev->m_gimbal_link & svGimbalTypeFindLinkType(dev->m_gimbal_type)) == GimbalLink::NONE)
|
||||
{
|
||||
throw std::runtime_error("gimbal Unsupported linktype !!!");
|
||||
}
|
||||
|
||||
if (list == IOList.end())
|
||||
{
|
||||
|
@ -159,20 +200,35 @@ void *sv::Gimbal::creatIO(sv::Gimbal *dev)
|
|||
}
|
||||
else if (dev->m_gimbal_link == sv::GimbalLink::ETHERNET_TCP)
|
||||
{
|
||||
TCPClient *tcp;
|
||||
tcp = new TCPClient(dev->m_net_ip, dev->m_net_port);
|
||||
key.first = dev->m_net_ip;
|
||||
key.second = (void *)tcp;
|
||||
IOList.insert(key);
|
||||
}
|
||||
else if (dev->m_gimbal_link == sv::GimbalLink::ETHERNET_UDP)
|
||||
{
|
||||
UDP *udp;
|
||||
udp = new UDP(dev->m_net_ip, dev->m_net_recv_port, dev->m_net_send_port);
|
||||
key.first = dev->m_net_ip;
|
||||
key.second = (void *)udp;
|
||||
IOList.insert(key);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "Error: gimbal IO has opened!!!" << std::endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "Error: gimbal IO has opened!!!" << std::endl;
|
||||
}
|
||||
IOListMutex.unlock();
|
||||
|
||||
return key.second;
|
||||
}
|
||||
|
||||
/**
|
||||
* The function removes a Gimbal device from the IOList.
|
||||
*
|
||||
* @param dev dev is a pointer to an object of type sv::Gimbal.
|
||||
*/
|
||||
void sv::Gimbal::removeIO(sv::Gimbal *dev)
|
||||
{
|
||||
IOListMutex.lock();
|
||||
|
@ -180,6 +236,7 @@ void sv::Gimbal::removeIO(sv::Gimbal *dev)
|
|||
|
||||
IOListMutex.unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* The function opens a communication interface with a gimbal device and sets up a parser to handle
|
||||
* incoming data.
|
||||
|
@ -196,14 +253,16 @@ bool sv::Gimbal::open(PStateInvoke callback)
|
|||
bool ret = false;
|
||||
|
||||
this->IO = creatIO(this);
|
||||
|
||||
if (this->IO != nullptr)
|
||||
{
|
||||
std::string driverName;
|
||||
driverName = sv::cvGimbalType2Str(this->m_gimbal_type);
|
||||
driverName = sv::svGimbalType2Str(this->m_gimbal_type);
|
||||
this->dev = new amovGimbal::gimbal(driverName, (amovGimbal::IOStreamBase *)this->IO);
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
pdevTemp->startStack();
|
||||
pdevTemp->parserAuto(callback);
|
||||
m_callback = callback;
|
||||
pdevTemp->parserAuto(sv::Gimbal::gimbalUpdataCallback, this);
|
||||
|
||||
ret = true;
|
||||
}
|
||||
|
@ -221,7 +280,7 @@ bool sv::Gimbal::open(PStateInvoke callback)
|
|||
bool sv::Gimbal::setHome()
|
||||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
if (pdevTemp->dev->setGimabalHome() > 0)
|
||||
if (pdevTemp->setGimabalHome() > 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -250,7 +309,7 @@ bool sv::Gimbal::setZoom(double x)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (pdevTemp->dev->setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_STOP, x) > 0)
|
||||
if (pdevTemp->setGimbalZoom(AMOV_GIMBAL_ZOOM_STOP, x) > 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -274,7 +333,7 @@ bool sv::Gimbal::setAutoZoom(int state)
|
|||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
|
||||
if (pdevTemp->dev->setGimbalZoom((amovGimbal::AMOV_GIMBAL_ZOOM_T)state, 0.0f) > 0)
|
||||
if (pdevTemp->setGimbalZoom((AMOV_GIMBAL_ZOOM_T)state, 0.0f) > 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -300,7 +359,7 @@ bool sv::Gimbal::setAutoFocus(int state)
|
|||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
|
||||
if (pdevTemp->dev->setGimbalFocus((amovGimbal::AMOV_GIMBAL_ZOOM_T)state, 0.0f) > 0)
|
||||
if (pdevTemp->setGimbalFocus((AMOV_GIMBAL_ZOOM_T)state, 0.0f) > 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -320,7 +379,7 @@ bool sv::Gimbal::takePhoto()
|
|||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
|
||||
if (pdevTemp->dev->takePic() > 0)
|
||||
if (pdevTemp->takePic() > 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -344,21 +403,21 @@ bool sv::Gimbal::takeVideo(int state)
|
|||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
|
||||
amovGimbal::AMOV_GIMBAL_VIDEO_T newState;
|
||||
AMOV_GIMBAL_VIDEO_T newState;
|
||||
switch (state)
|
||||
{
|
||||
case 0:
|
||||
newState = amovGimbal::AMOV_GIMBAL_VIDEO_OFF;
|
||||
newState = AMOV_GIMBAL_VIDEO_OFF;
|
||||
break;
|
||||
case 1:
|
||||
newState = amovGimbal::AMOV_GIMBAL_VIDEO_TAKE;
|
||||
newState = AMOV_GIMBAL_VIDEO_TAKE;
|
||||
break;
|
||||
default:
|
||||
newState = amovGimbal::AMOV_GIMBAL_VIDEO_OFF;
|
||||
newState = AMOV_GIMBAL_VIDEO_OFF;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pdevTemp->dev->setVideo(newState) > 0)
|
||||
if (pdevTemp->setVideo(newState) > 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -379,13 +438,13 @@ int sv::Gimbal::getVideoState()
|
|||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
int ret;
|
||||
amovGimbal::AMOV_GIMBAL_STATE_T state;
|
||||
AMOV_GIMBAL_STATE_T state;
|
||||
state = pdevTemp->getGimabalState();
|
||||
if (state.video == amovGimbal::AMOV_GIMBAL_VIDEO_TAKE)
|
||||
if (state.video == AMOV_GIMBAL_VIDEO_TAKE)
|
||||
{
|
||||
ret = 1;
|
||||
}
|
||||
else if (state.video == amovGimbal::AMOV_GIMBAL_VIDEO_OFF)
|
||||
else if (state.video == AMOV_GIMBAL_VIDEO_OFF)
|
||||
{
|
||||
ret = 0;
|
||||
}
|
||||
|
@ -413,7 +472,7 @@ void sv::Gimbal::setAngleEuler(double roll, double pitch, double yaw,
|
|||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
|
||||
amovGimbal::AMOV_GIMBAL_POS_T temp;
|
||||
AMOV_GIMBAL_POS_T temp;
|
||||
|
||||
if (pitch_rate == 0.0f)
|
||||
pitch_rate = 360;
|
||||
|
@ -425,11 +484,11 @@ void sv::Gimbal::setAngleEuler(double roll, double pitch, double yaw,
|
|||
temp.pitch = pitch_rate;
|
||||
temp.roll = roll_rate;
|
||||
temp.yaw = yaw_rate;
|
||||
pdevTemp->dev->setGimabalFollowSpeed(temp);
|
||||
pdevTemp->setGimabalFollowSpeed(temp);
|
||||
temp.pitch = pitch;
|
||||
temp.roll = roll;
|
||||
temp.yaw = yaw;
|
||||
pdevTemp->dev->setGimabalPos(temp);
|
||||
pdevTemp->setGimabalPos(temp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -444,11 +503,55 @@ void sv::Gimbal::setAngleRateEuler(double roll_rate, double pitch_rate, double y
|
|||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
|
||||
amovGimbal::AMOV_GIMBAL_POS_T temp;
|
||||
AMOV_GIMBAL_POS_T temp;
|
||||
temp.pitch = pitch_rate;
|
||||
temp.roll = roll_rate;
|
||||
temp.yaw = yaw_rate;
|
||||
pdevTemp->dev->setGimabalSpeed(temp);
|
||||
pdevTemp->setGimabalSpeed(temp);
|
||||
}
|
||||
void sv::Gimbal::attitudeCorrection(const GimbalQuaternionT &quaterion,
|
||||
const GimbalVelocityT &speed,
|
||||
const GimbalVelocityT &acc, void *extenData)
|
||||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
AMOV_GIMBAL_QUATERNION_T temp_q;
|
||||
temp_q.q0 = quaterion.q0;
|
||||
temp_q.q1 = quaterion.q1;
|
||||
temp_q.q2 = quaterion.q2;
|
||||
temp_q.q3 = quaterion.q3;
|
||||
|
||||
AMOV_GIMBAL_VELOCITY_T temp_v1, temp_v2;
|
||||
temp_v1.x = speed.x;
|
||||
temp_v1.y = speed.y;
|
||||
temp_v1.z = speed.z;
|
||||
|
||||
temp_v2.x = acc.x;
|
||||
temp_v2.y = acc.y;
|
||||
temp_v2.z = acc.z;
|
||||
|
||||
pdevTemp->attitudeCorrection(temp_q, temp_v1, temp_v2, extenData);
|
||||
}
|
||||
|
||||
void sv::Gimbal::attitudeCorrection(const GimbalPosT &pos,
|
||||
const GimbalVelocityT &speed,
|
||||
const GimbalVelocityT &acc, void *extenData)
|
||||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
AMOV_GIMBAL_VELOCITY_T temp_v1, temp_v2;
|
||||
temp_v1.x = speed.x;
|
||||
temp_v1.y = speed.y;
|
||||
temp_v1.z = speed.z;
|
||||
|
||||
temp_v2.x = acc.x;
|
||||
temp_v2.y = acc.y;
|
||||
temp_v2.z = acc.z;
|
||||
|
||||
AMOV_GIMBAL_POS_T temp_p;
|
||||
temp_p.pitch = pos.pitch;
|
||||
temp_p.yaw = pos.yaw;
|
||||
temp_p.roll = pos.roll;
|
||||
|
||||
pdevTemp->attitudeCorrection(temp_p, temp_v1, temp_v2, extenData);
|
||||
}
|
||||
|
||||
sv::Gimbal::~Gimbal()
|
||||
|
|
|
@ -3,65 +3,236 @@
|
|||
* @Author: L LC @amov
|
||||
* @Date: 2023-04-12 12:22:09
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-04-13 10:17:21
|
||||
* @FilePath: /spirecv-gimbal-sdk/gimbal_ctrl/sv_gimbal_io.hpp
|
||||
* @LastEditTime: 2023-12-05 17:38:59
|
||||
* @FilePath: /SpireCV/gimbal_ctrl/sv_gimbal_io.hpp
|
||||
*/
|
||||
#ifndef __SV_GIMABL_IO_H
|
||||
#define __SV_GIMABL_IO_H
|
||||
|
||||
#include "amov_gimbal.h"
|
||||
#include "amovGimbal/amov_gimbal.h"
|
||||
#include "serial/serial.h"
|
||||
|
||||
class UART : public amovGimbal::IOStreamBase
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#if defined(_WIN32)
|
||||
#include <winsock2.h>
|
||||
#include <ws2tcpip.h>
|
||||
#endif
|
||||
|
||||
#if defined(__linux__)
|
||||
#include <arpa/inet.h>
|
||||
#endif
|
||||
|
||||
#include <unistd.h>
|
||||
namespace sv
|
||||
{
|
||||
private:
|
||||
serial::Serial *ser;
|
||||
class UART : public amovGimbal::IOStreamBase
|
||||
{
|
||||
private:
|
||||
serial::Serial *ser;
|
||||
|
||||
public:
|
||||
virtual bool open()
|
||||
{
|
||||
ser->open();
|
||||
return true;
|
||||
}
|
||||
virtual bool close()
|
||||
{
|
||||
ser->close();
|
||||
return true;
|
||||
}
|
||||
virtual bool isOpen()
|
||||
{
|
||||
return ser->isOpen();
|
||||
}
|
||||
virtual bool isBusy()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
public:
|
||||
virtual bool open()
|
||||
{
|
||||
ser->open();
|
||||
return true;
|
||||
}
|
||||
virtual bool close()
|
||||
{
|
||||
ser->close();
|
||||
return true;
|
||||
}
|
||||
virtual bool isOpen()
|
||||
{
|
||||
return ser->isOpen();
|
||||
}
|
||||
virtual bool isBusy()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
virtual uint32_t inPutBytes(IN uint8_t *byte)
|
||||
{
|
||||
if (ser->read(byte, 1))
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
virtual uint32_t outPutBytes(IN uint8_t *byte, uint32_t lenght)
|
||||
{
|
||||
return ser->write(byte, lenght);
|
||||
}
|
||||
UART(const std::string &port, uint32_t baudrate, serial::Timeout timeout,
|
||||
serial::bytesize_t bytesize, serial::parity_t parity, serial::stopbits_t stopbits,
|
||||
serial::flowcontrol_t flowcontrol)
|
||||
{
|
||||
ser = new serial::Serial(port, baudrate, timeout, bytesize, parity, stopbits, flowcontrol);
|
||||
}
|
||||
~UART()
|
||||
{
|
||||
ser->close();
|
||||
}
|
||||
};
|
||||
|
||||
virtual bool inPutByte(IN uint8_t *byte)
|
||||
int scoketClose(int scoket)
|
||||
{
|
||||
if (ser->read(byte, 1))
|
||||
#if defined(_WIN32)
|
||||
return closesocket(scoket);
|
||||
#endif
|
||||
#if defined(__linux__)
|
||||
return close(scoket);
|
||||
#endif
|
||||
}
|
||||
class TCPClient : public amovGimbal::IOStreamBase
|
||||
{
|
||||
private:
|
||||
int scoketFd;
|
||||
sockaddr_in scoketAddr;
|
||||
|
||||
public:
|
||||
virtual bool open()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
virtual bool close()
|
||||
{
|
||||
sv::scoketClose(scoketFd);
|
||||
return true;
|
||||
}
|
||||
virtual bool isOpen()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
virtual bool isBusy()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
virtual uint32_t inPutBytes(IN uint8_t *byte)
|
||||
{
|
||||
return recv(scoketFd, (char *)byte, 65536, 0);
|
||||
}
|
||||
virtual uint32_t outPutBytes(IN uint8_t *byte, uint32_t lenght)
|
||||
{
|
||||
return send(scoketFd, (const char *)byte, lenght, 0);
|
||||
}
|
||||
|
||||
virtual uint32_t outPutBytes(IN uint8_t *byte, uint32_t lenght)
|
||||
{
|
||||
return ser->write(byte, lenght);
|
||||
}
|
||||
TCPClient(const std::string &addr, const uint16_t port)
|
||||
{
|
||||
if ((scoketFd = socket(AF_INET, SOCK_STREAM, 0)) == -1)
|
||||
{
|
||||
throw std::runtime_error("scoket creat failed");
|
||||
}
|
||||
memset(&scoketAddr, 0, sizeof(scoketAddr));
|
||||
scoketAddr.sin_family = AF_INET;
|
||||
scoketAddr.sin_addr.s_addr = inet_addr(addr.c_str());
|
||||
|
||||
UART(const std::string &port, uint32_t baudrate, serial::Timeout timeout,
|
||||
serial::bytesize_t bytesize, serial::parity_t parity, serial::stopbits_t stopbits,
|
||||
serial::flowcontrol_t flowcontrol)
|
||||
if (scoketAddr.sin_addr.s_addr == INADDR_NONE ||
|
||||
scoketAddr.sin_addr.s_addr == INADDR_ANY)
|
||||
{
|
||||
sv::scoketClose(scoketFd);
|
||||
|
||||
throw std::runtime_error("scoket addr errr");
|
||||
}
|
||||
scoketAddr.sin_port = htons(port);
|
||||
|
||||
if (connect(scoketFd, (struct sockaddr *)&scoketAddr, sizeof(scoketAddr)) != 0)
|
||||
{
|
||||
sv::scoketClose(scoketFd);
|
||||
|
||||
throw std::runtime_error("scoket connect failed !");
|
||||
}
|
||||
}
|
||||
~TCPClient()
|
||||
{
|
||||
close();
|
||||
}
|
||||
};
|
||||
|
||||
class UDP : public amovGimbal::IOStreamBase
|
||||
{
|
||||
ser = new serial::Serial(port, baudrate, timeout, bytesize, parity, stopbits, flowcontrol);
|
||||
}
|
||||
~UART()
|
||||
{
|
||||
ser->close();
|
||||
delete ser;
|
||||
}
|
||||
};
|
||||
private:
|
||||
int rcvScoketFd, sendScoketFd;
|
||||
sockaddr_in rcvScoketAddr, sendScoketAddr;
|
||||
|
||||
public:
|
||||
virtual bool open()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
virtual bool close()
|
||||
{
|
||||
sv::scoketClose(rcvScoketFd);
|
||||
sv::scoketClose(sendScoketFd);
|
||||
return true;
|
||||
}
|
||||
virtual bool isOpen()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
virtual bool isBusy()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
virtual uint32_t inPutBytes(IN uint8_t *byte)
|
||||
{
|
||||
sockaddr_in remoteAddr;
|
||||
int len = sizeof(remoteAddr);
|
||||
|
||||
return recvfrom(rcvScoketFd, (char *)byte, 65536, 0,
|
||||
(struct sockaddr *)&remoteAddr, reinterpret_cast<socklen_t *>(&len));
|
||||
}
|
||||
virtual uint32_t outPutBytes(IN uint8_t *byte, uint32_t lenght)
|
||||
{
|
||||
return sendto(sendScoketFd, (const char *)byte, lenght, 0,
|
||||
(struct sockaddr *)&sendScoketAddr, sizeof(sendScoketAddr));
|
||||
}
|
||||
|
||||
UDP(const std::string &remoteAddr, const uint16_t rcvPort, uint16_t sendPort)
|
||||
{
|
||||
if ((rcvScoketFd = socket(AF_INET, SOCK_DGRAM, 0)) == -1 ||
|
||||
(sendScoketFd = socket(AF_INET, SOCK_DGRAM, 0)) == -1)
|
||||
{
|
||||
sv::scoketClose(rcvScoketFd);
|
||||
sv::scoketClose(sendScoketFd);
|
||||
throw std::runtime_error("scoket creat failed");
|
||||
}
|
||||
memset(&rcvScoketAddr, 0, sizeof(rcvScoketAddr));
|
||||
memset(&sendScoketAddr, 0, sizeof(sendScoketAddr));
|
||||
sendScoketAddr.sin_family = AF_INET;
|
||||
sendScoketAddr.sin_addr.s_addr = inet_addr(remoteAddr.c_str());
|
||||
sendScoketAddr.sin_port = htons(sendPort);
|
||||
|
||||
if (sendScoketAddr.sin_addr.s_addr == INADDR_NONE ||
|
||||
sendScoketAddr.sin_addr.s_addr == INADDR_ANY)
|
||||
{
|
||||
sv::scoketClose(sendScoketFd);
|
||||
|
||||
throw std::runtime_error("scoket addr errr");
|
||||
}
|
||||
|
||||
rcvScoketAddr.sin_family = AF_INET;
|
||||
rcvScoketAddr.sin_addr.s_addr = INADDR_ANY;
|
||||
rcvScoketAddr.sin_port = htons(rcvPort);
|
||||
|
||||
if (rcvScoketAddr.sin_addr.s_addr == INADDR_NONE)
|
||||
{
|
||||
sv::scoketClose(rcvScoketFd);
|
||||
|
||||
throw std::runtime_error("scoket addr errr");
|
||||
}
|
||||
|
||||
if (bind(rcvScoketFd, (struct sockaddr *)&rcvScoketAddr, sizeof(rcvScoketAddr)) == -1)
|
||||
{
|
||||
sv::scoketClose(rcvScoketFd);
|
||||
|
||||
throw std::runtime_error("scoket bind failed !");
|
||||
}
|
||||
}
|
||||
~UDP()
|
||||
{
|
||||
close();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -46,6 +46,7 @@ class ArucoDetector : public CameraAlgorithm
|
|||
public:
|
||||
ArucoDetector();
|
||||
void detect(cv::Mat img_, TargetsInFrame& tgts_);
|
||||
void getIdsWithLengths(std::vector<int>& ids_, std::vector<double>& lengths_);
|
||||
private:
|
||||
void _load();
|
||||
bool _params_loaded;
|
||||
|
@ -104,10 +105,6 @@ public:
|
|||
std::string getAlgorithm();
|
||||
int getBackend();
|
||||
int getTarget();
|
||||
double getObjectWs();
|
||||
double getObjectHs();
|
||||
int useWidthOrHeight();
|
||||
|
||||
protected:
|
||||
virtual bool setupImpl();
|
||||
virtual void initImpl(cv::Mat img_, const cv::Rect& bounding_box_);
|
||||
|
@ -117,9 +114,6 @@ protected:
|
|||
std::string _algorithm;
|
||||
int _backend;
|
||||
int _target;
|
||||
int _use_width_or_height;
|
||||
double _object_ws;
|
||||
double _object_hs;
|
||||
};
|
||||
|
||||
|
||||
|
@ -146,6 +140,8 @@ public:
|
|||
double getThrsConf();
|
||||
int useWidthOrHeight();
|
||||
bool withSegmentation();
|
||||
std::string getModel();
|
||||
int getBatchSize();
|
||||
protected:
|
||||
virtual bool setupImpl();
|
||||
virtual void detectImpl(
|
||||
|
@ -172,9 +168,10 @@ protected:
|
|||
double _thrs_conf;
|
||||
int _use_width_or_height;
|
||||
bool _with_segmentation;
|
||||
std::string _model;
|
||||
int _batch_size;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ public:
|
|||
double line_location_a2;
|
||||
|
||||
bool is_load_parameter;
|
||||
|
||||
std::string line_color;
|
||||
|
||||
protected:
|
||||
|
@ -40,4 +39,4 @@ protected:
|
|||
void seg(cv::Mat line_area_, cv::Mat line_area_a1_, cv::Mat line_area_a2_, std::string line_color_, cv::Point ¢er_, int &area_, cv::Point ¢er_a1_, cv::Point ¢er_a2_);
|
||||
};
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -8,32 +8,36 @@
|
|||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
|
||||
namespace sv {
|
||||
namespace sv
|
||||
{
|
||||
|
||||
class CommonObjectDetectorCUDAImpl;
|
||||
class CommonObjectDetectorIntelImpl;
|
||||
|
||||
|
||||
class CommonObjectDetector : public CommonObjectDetectorBase
|
||||
{
|
||||
public:
|
||||
CommonObjectDetector();
|
||||
CommonObjectDetector(bool input_4k = false);
|
||||
~CommonObjectDetector();
|
||||
|
||||
protected:
|
||||
bool setupImpl();
|
||||
void detectImpl(
|
||||
cv::Mat img_,
|
||||
std::vector<float>& boxes_x_,
|
||||
std::vector<float>& boxes_y_,
|
||||
std::vector<float>& boxes_w_,
|
||||
std::vector<float>& boxes_h_,
|
||||
std::vector<int>& boxes_label_,
|
||||
std::vector<float>& boxes_score_,
|
||||
std::vector<cv::Mat>& boxes_seg_
|
||||
);
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_);
|
||||
|
||||
CommonObjectDetectorCUDAImpl* _cuda_impl;
|
||||
CommonObjectDetectorCUDAImpl *_cuda_impl;
|
||||
CommonObjectDetectorIntelImpl *_intel_impl;
|
||||
|
||||
bool _input_4k;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* @Author: jario-jin @amov
|
||||
* @Date: 2023-04-12 09:12:52
|
||||
* @LastEditors: L LC @amov
|
||||
* @LastEditTime: 2023-04-18 11:49:27
|
||||
* @FilePath: /spirecv-gimbal-sdk/include/sv_gimbal.h
|
||||
* @LastEditTime: 2023-12-05 17:25:40
|
||||
* @FilePath: /SpireCV/include/sv_gimbal.h
|
||||
*/
|
||||
#ifndef __SV_GIMBAL__
|
||||
#define __SV_GIMBAL__
|
||||
|
@ -27,14 +27,26 @@ namespace sv
|
|||
G1,
|
||||
Q10f,
|
||||
AT10,
|
||||
GX40,
|
||||
};
|
||||
enum class GimbalLink
|
||||
enum class GimbalLink : int
|
||||
{
|
||||
SERIAL,
|
||||
ETHERNET_TCP,
|
||||
ETHERNET_UDP
|
||||
NONE = 0x00,
|
||||
SERIAL = 0x01,
|
||||
ETHERNET_TCP = 0x02,
|
||||
ETHERNET_UDP = 0x04,
|
||||
};
|
||||
|
||||
constexpr GimbalLink operator|(GimbalLink a, GimbalLink b)
|
||||
{
|
||||
return static_cast<GimbalLink>(static_cast<int>(a) | static_cast<int>(b));
|
||||
}
|
||||
|
||||
constexpr GimbalLink operator&(GimbalLink a, GimbalLink b)
|
||||
{
|
||||
return static_cast<GimbalLink>(static_cast<int>(a) & static_cast<int>(b));
|
||||
}
|
||||
|
||||
enum class GimablSerialByteSize
|
||||
{
|
||||
FIVE_BYTES = 5,
|
||||
|
@ -66,6 +78,28 @@ namespace sv
|
|||
FLOWCONTROL_HARDWARE = 2,
|
||||
};
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double q0;
|
||||
double q1;
|
||||
double q2;
|
||||
double q3;
|
||||
} GimbalQuaternionT;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double x; // or N
|
||||
double y; // or E
|
||||
double z; // or UP
|
||||
} GimbalVelocityT;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double yaw;
|
||||
double roll;
|
||||
double pitch;
|
||||
} GimbalPosT;
|
||||
|
||||
static inline void emptyCallback(double &frameAngleRoll, double &frameAnglePitch, double &frameAngleYaw,
|
||||
double &imuAngleRoll, double &imuAnglePitch, double &imuAngleYaw,
|
||||
double &fovX, double &fovY)
|
||||
|
@ -83,6 +117,7 @@ namespace sv
|
|||
// Device pointers
|
||||
void *dev;
|
||||
void *IO;
|
||||
PStateInvoke m_callback;
|
||||
|
||||
// Generic serial interface parameters list & default parameters
|
||||
std::string m_serial_port = "/dev/ttyUSB0";
|
||||
|
@ -94,8 +129,10 @@ namespace sv
|
|||
int m_serial_timeout = 500;
|
||||
|
||||
// Ethernet interface parameters list & default parameters
|
||||
std::string m_net_ip = "192.168.2.64";
|
||||
int m_net_port = 9090;
|
||||
std::string m_net_ip = "192.168.144.121";
|
||||
int m_net_port = 2332;
|
||||
int m_net_recv_port = 2338;
|
||||
int m_net_send_port = 2337;
|
||||
|
||||
GimbalType m_gimbal_type;
|
||||
GimbalLink m_gimbal_link;
|
||||
|
@ -104,6 +141,14 @@ namespace sv
|
|||
static std::mutex IOListMutex;
|
||||
static void *creatIO(Gimbal *dev);
|
||||
static void removeIO(Gimbal *dev);
|
||||
|
||||
static void gimbalUpdataCallback(double frameAngleRoll, double frameAnglePitch, double frameAngleYaw,
|
||||
double imuAngleRoll, double imuAnglePitch, double imuAngleYaw,
|
||||
double fovX, double fovY, void *handle)
|
||||
{
|
||||
((Gimbal *)(handle))->m_callback(frameAngleRoll, frameAnglePitch, frameAngleYaw, imuAngleRoll, imuAnglePitch, imuAngleYaw, fovX, fovY);
|
||||
}
|
||||
|
||||
public:
|
||||
//! Constructor
|
||||
/*!
|
||||
|
@ -125,7 +170,10 @@ namespace sv
|
|||
|
||||
// set Ethernet interface parameters
|
||||
void setNetIp(const std::string &ip);
|
||||
void setNetPort(const int &port);
|
||||
// set tcp port
|
||||
void setTcpNetPort(const int &port);
|
||||
// set udp port
|
||||
void setUdpNetPort(const int &recvPort, const int &sendPort);
|
||||
|
||||
// Create a device instance
|
||||
void setStateCallback(PStateInvoke callback);
|
||||
|
@ -139,6 +187,12 @@ namespace sv
|
|||
bool takePhoto();
|
||||
bool takeVideo(int state);
|
||||
int getVideoState();
|
||||
void attitudeCorrection(const GimbalQuaternionT &quaterion,
|
||||
const GimbalVelocityT &speed,
|
||||
const GimbalVelocityT &acc, void *extenData);
|
||||
void attitudeCorrection(const GimbalPosT &pos,
|
||||
const GimbalVelocityT &speed,
|
||||
const GimbalVelocityT &acc, void *extenData);
|
||||
|
||||
//! Set gimbal angles
|
||||
/*!
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
namespace sv {
|
||||
|
||||
class LandingMarkerDetectorCUDAImpl;
|
||||
class LandingMarkerDetectorIntelImpl;
|
||||
|
||||
class LandingMarkerDetector : public LandingMarkerDetectorBase
|
||||
{
|
||||
|
@ -25,7 +26,8 @@ protected:
|
|||
std::vector<int>& output_labels_
|
||||
);
|
||||
|
||||
LandingMarkerDetectorCUDAImpl* _cuda_impl;
|
||||
LandingMarkerDetectorCUDAImpl *_cuda_impl;
|
||||
LandingMarkerDetectorIntelImpl *_intel_impl;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ namespace sv {
|
|||
|
||||
|
||||
class SORT;
|
||||
|
||||
class BYTETracker;
|
||||
|
||||
class MultipleObjectTracker : public CameraAlgorithm
|
||||
{
|
||||
|
@ -24,7 +24,7 @@ public:
|
|||
~MultipleObjectTracker();
|
||||
|
||||
void init(CommonObjectDetector* detector_);
|
||||
void track(cv::Mat img_, TargetsInFrame& tgts_);
|
||||
sv::TargetsInFrame track(cv::Mat img_, TargetsInFrame& tgts_);
|
||||
|
||||
private:
|
||||
void _load();
|
||||
|
@ -39,70 +39,12 @@ private:
|
|||
double _iou_thres;
|
||||
int _max_age;
|
||||
int _min_hits;
|
||||
int _frame_rate;
|
||||
int _track_buffer;
|
||||
|
||||
SORT* _sort_impl;
|
||||
BYTETracker* _bytetrack_impl;
|
||||
CommonObjectDetector* _detector;
|
||||
};
|
||||
|
||||
|
||||
// define the tracklet struct to store the tracked objects.
|
||||
struct Tracklet
|
||||
{
|
||||
/* data */
|
||||
public:
|
||||
Eigen::Vector4d bbox; // double x, y, w, h;
|
||||
int id = 0;
|
||||
int age;
|
||||
int hits;
|
||||
int misses;
|
||||
int frame_id = 0;
|
||||
int category_id;
|
||||
bool tentative;
|
||||
std::vector<double> features;
|
||||
Eigen::Matrix<double, 8, 1> mean;
|
||||
Eigen::Matrix<double, 8, 8> covariance;
|
||||
};
|
||||
|
||||
|
||||
class KalmanFilter {
|
||||
public:
|
||||
KalmanFilter();
|
||||
~KalmanFilter();
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > initiate(Eigen::Vector4d &bbox);
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > update(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances, Box &box);
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > predict(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances);
|
||||
std::pair<Eigen::Matrix<double, 4, 1>, Eigen::Matrix<double, 4, 4> > project(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances);
|
||||
private:
|
||||
Eigen::Matrix<double, 8, 8> _F;
|
||||
Eigen::Matrix<double, 4, 8> _H;
|
||||
Eigen::Matrix<double, 9, 1> _chi2inv95;
|
||||
double _std_weight_position;
|
||||
double _std_weight_vel;
|
||||
};
|
||||
|
||||
|
||||
class SORT {
|
||||
public:
|
||||
SORT(double iou_threshold, int max_age, int min_hits): _iou_threshold(iou_threshold), _max_age(max_age), _min_hits(min_hits), _next_tracklet_id(0) {};
|
||||
~SORT();
|
||||
void update(TargetsInFrame &tgts);
|
||||
std::vector<Tracklet> getTracklets() const;
|
||||
private:
|
||||
double _iou(Tracklet &tracklet, Box &box);
|
||||
std::vector<std::pair<int,int>> _hungarian(std::vector<std::vector<double>> costMatrix);
|
||||
double _findMin(const std::vector<double>& vec);
|
||||
void _subtractMinFromRows(std::vector<std::vector<double>>& costMatrix);
|
||||
void _subtractMinFromCols(std::vector<std::vector<double>>& costMatrix);
|
||||
//bool _augment(const std::vector<std::vector<double>>& costMatrix, int row, std::vector<int>& rowMatch, std::vector<int>& colMatch, std::vector<bool>& visited);
|
||||
|
||||
double _iou_threshold;
|
||||
int _max_age;
|
||||
int _min_hits;
|
||||
int _next_tracklet_id;
|
||||
std::vector <Tracklet> _tracklets;
|
||||
std::vector <Tracklet> _new_tracklets;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -8,10 +8,11 @@
|
|||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
|
||||
namespace sv {
|
||||
namespace sv
|
||||
{
|
||||
|
||||
class VeriDetectorCUDAImpl;
|
||||
class VeriDetectorIntelImpl;
|
||||
|
||||
class VeriDetector : public LandingMarkerDetectorBase
|
||||
{
|
||||
|
@ -19,18 +20,25 @@ public:
|
|||
VeriDetector();
|
||||
~VeriDetector();
|
||||
|
||||
void detect(cv::Mat img1_, cv::Mat img2_, TargetsInFrame &tgts_);
|
||||
void detect(cv::Mat img_, const cv::Rect &bounding_box_, sv::Target &tgt);
|
||||
|
||||
protected:
|
||||
void _load();
|
||||
bool setupImpl();
|
||||
void roiCNN(
|
||||
std::vector<cv::Mat>& input_rois_,
|
||||
std::vector<int>& output_labels_
|
||||
);
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<float> &output_labels_);
|
||||
void getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz);
|
||||
|
||||
VeriDetectorCUDAImpl* _cuda_impl;
|
||||
std::string vehicle_id;
|
||||
|
||||
// Save the target bounding box for each frame.
|
||||
std::vector<float> targetSz = {0, 0}; // H and W of bounding box
|
||||
std::vector<float> targetPos = {0, 0}; // center point of bounding box (x, y)
|
||||
|
||||
VeriDetectorCUDAImpl *_cuda_impl;
|
||||
VeriDetectorIntelImpl *_intel_impl;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -12,9 +12,6 @@
|
|||
#include <arpa/inet.h>
|
||||
#include <netinet/in.h> // for sockaddr_in
|
||||
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
|
||||
#define SV_RAD2DEG 57.2957795
|
||||
// #define X86_PLATFORM
|
||||
// #define JETSON_PLATFORM
|
||||
|
@ -93,6 +90,9 @@ public:
|
|||
double los_ay;
|
||||
//! The angle of the target in the image coordinate system, (unit: degree) [-180, 180].
|
||||
double yaw_a;
|
||||
|
||||
//! Similarity, Confidence, (0, 1]
|
||||
double sim_score;
|
||||
|
||||
//! Whether the height&width of the target can be obtained.
|
||||
bool has_hw;
|
||||
|
@ -128,6 +128,7 @@ public:
|
|||
|
||||
bool getBox(Box& b);
|
||||
bool getAruco(int& id, std::vector<cv::Point2f> &corners);
|
||||
bool getAruco(int& id, std::vector<cv::Point2f> &corners, cv::Vec3d &rvecs, cv::Vec3d &tvecs);
|
||||
bool getEllipse(double& xc_, double& yc_, double& a_, double& b_, double& rad_);
|
||||
std::string getJsonStr();
|
||||
|
||||
|
@ -326,13 +327,13 @@ protected:
|
|||
};
|
||||
|
||||
|
||||
enum class CameraType {NONE, WEBCAM, G1, Q10, MIPI};
|
||||
enum class CameraType {NONE, WEBCAM, V4L2CAM, MIPI, RTSP, VIDEO, G1, Q10, GX40};
|
||||
|
||||
class CameraBase {
|
||||
public:
|
||||
CameraBase(CameraType type=CameraType::NONE, int id=0);
|
||||
~CameraBase();
|
||||
void open(CameraType type=CameraType::WEBCAM, int id=0);
|
||||
void open(CameraType type=CameraType::V4L2CAM, int id=0);
|
||||
bool read(cv::Mat& image);
|
||||
void release();
|
||||
|
||||
|
@ -346,10 +347,14 @@ public:
|
|||
double getSaturation();
|
||||
double getHue();
|
||||
double getExposure();
|
||||
std::string getFourcc();
|
||||
bool isRunning();
|
||||
void setFourcc(std::string fourcc);
|
||||
void setWH(int width, int height);
|
||||
void setFps(int fps);
|
||||
void setIp(std::string ip);
|
||||
void setRtspUrl(std::string rtsp_url);
|
||||
void setVideoPath(std::string video_path);
|
||||
void setPort(int port);
|
||||
void setBrightness(double brightness);
|
||||
void setContrast(double contrast);
|
||||
|
@ -361,15 +366,7 @@ protected:
|
|||
void _run();
|
||||
|
||||
bool _is_running;
|
||||
|
||||
// new mutex
|
||||
std::mutex _frame_mutex;
|
||||
std::condition_variable_any _frame_empty;
|
||||
|
||||
//old flag
|
||||
bool _is_updated;
|
||||
|
||||
|
||||
std::thread _tt;
|
||||
cv::VideoCapture _cap;
|
||||
cv::Mat _frame;
|
||||
|
@ -380,18 +377,21 @@ protected:
|
|||
int _height;
|
||||
int _fps;
|
||||
std::string _ip;
|
||||
std::string _rtsp_url;
|
||||
std::string _video_path;
|
||||
int _port;
|
||||
double _brightness;
|
||||
double _contrast;
|
||||
double _saturation;
|
||||
double _hue;
|
||||
double _exposure;
|
||||
std::string _fourcc;
|
||||
};
|
||||
|
||||
|
||||
void drawTargetsInFrame(
|
||||
cv::Mat& img_,
|
||||
const TargetsInFrame& tgts_,
|
||||
int aruco_track_id,
|
||||
bool with_all=true,
|
||||
bool with_category=false,
|
||||
bool with_tid=false,
|
||||
|
@ -401,20 +401,15 @@ void drawTargetsInFrame(
|
|||
bool with_aruco=false,
|
||||
bool with_yaw=false
|
||||
);
|
||||
|
||||
|
||||
|
||||
void drawTargetsInFrame(
|
||||
cv::Mat& img_,
|
||||
cv::Mat drawTargetsInFrame(
|
||||
const cv::Mat img_,
|
||||
const TargetsInFrame& tgts_,
|
||||
const double scale,
|
||||
bool with_all=true,
|
||||
bool with_category=false,
|
||||
bool with_tid=false,
|
||||
bool with_seg=false,
|
||||
bool with_box=false,
|
||||
bool with_ell=false,
|
||||
bool with_aruco=false,
|
||||
bool with_yaw=false
|
||||
bool with_box=false
|
||||
);
|
||||
std::string get_home();
|
||||
bool is_file_exist(std::string& fn);
|
||||
|
@ -423,4 +418,3 @@ void list_dir(std::string dir, std::vector<std::string>& files, std::string suff
|
|||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -124,7 +126,9 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["h"],
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 1280,
|
||||
"withSegmentation": false,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": false,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -124,7 +126,9 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -123,8 +125,9 @@
|
|||
"reid_num_samples": 10,
|
||||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": false,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": false,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -124,7 +126,9 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 1280,
|
||||
"withSegmentation": false,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": false,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -124,7 +126,9 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": false,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -124,7 +126,9 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -124,7 +126,9 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -124,7 +126,9 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -124,7 +126,9 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -124,7 +126,9 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -18,9 +18,9 @@ const static int kInputH_HD = 1280;
|
|||
const static int kInputW_HD = 1280;
|
||||
const static int kOutputSize = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
|
||||
|
||||
bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, bool& is_p6, float& gd, float& gw, std::string& img_dir, int& n_classes) {
|
||||
bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, bool& is_p6, float& gd, float& gw, std::string& img_dir, int& n_classes, int& n_batch) {
|
||||
if (argc < 4) return false;
|
||||
if (std::string(argv[1]) == "-s" && (argc == 6 || argc == 8)) {
|
||||
if (std::string(argv[1]) == "-s" && (argc == 6 || argc == 7 || argc == 8)) {
|
||||
wts = std::string(argv[2]);
|
||||
engine = std::string(argv[3]);
|
||||
n_classes = atoi(argv[4]);
|
||||
|
@ -51,6 +51,9 @@ bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, bo
|
|||
if (net.size() == 2 && net[1] == '6') {
|
||||
is_p6 = true;
|
||||
}
|
||||
if (argc == 7) {
|
||||
n_batch = atoi(argv[6]);
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
@ -99,18 +102,20 @@ int main(int argc, char** argv) {
|
|||
float gd = 0.0f, gw = 0.0f;
|
||||
std::string img_dir;
|
||||
int n_classes;
|
||||
int n_batch = 1;
|
||||
|
||||
if (!parse_args(argc, argv, wts_name, engine_name, is_p6, gd, gw, img_dir, n_classes)) {
|
||||
if (!parse_args(argc, argv, wts_name, engine_name, is_p6, gd, gw, img_dir, n_classes, n_batch)) {
|
||||
std::cerr << "arguments not right!" << std::endl;
|
||||
std::cerr << "./SpireCVDet -s [.wts] [.engine] n_classes [n/s/m/l/x/n6/s6/m6/l6/x6 or c/c6 gd gw] // serialize model to plan file" << std::endl;
|
||||
// std::cerr << "./SpireCVDet -d [.engine] ../images // deserialize plan file and run inference" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
std::cout << "n_classes: " << n_classes << std::endl;
|
||||
std::cout << "max_batch: " << n_batch << std::endl;
|
||||
|
||||
// Create a model using the API directly and serialize it to a file
|
||||
if (!wts_name.empty()) {
|
||||
serialize_engine(kBatchSize, is_p6, gd, gw, wts_name, engine_name, n_classes);
|
||||
serialize_engine(n_batch, is_p6, gd, gw, wts_name, engine_name, n_classes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,44 +18,47 @@ const static int kInputW = 640;
|
|||
const static int kOutputSize1 = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
|
||||
const static int kOutputSize2 = 32 * (kInputH / 4) * (kInputW / 4);
|
||||
|
||||
bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, float& gd, float& gw, std::string& img_dir, std::string& labels_filename, int& n_classes) {
|
||||
if (argc < 4) return false;
|
||||
if (std::string(argv[1]) == "-s" && (argc == 6 || argc == 8)) {
|
||||
wts = std::string(argv[2]);
|
||||
engine = std::string(argv[3]);
|
||||
n_classes = atoi(argv[4]);
|
||||
if (n_classes < 1)
|
||||
return false;
|
||||
auto net = std::string(argv[5]);
|
||||
if (net[0] == 'n') {
|
||||
gd = 0.33;
|
||||
gw = 0.25;
|
||||
} else if (net[0] == 's') {
|
||||
gd = 0.33;
|
||||
gw = 0.50;
|
||||
} else if (net[0] == 'm') {
|
||||
gd = 0.67;
|
||||
gw = 0.75;
|
||||
} else if (net[0] == 'l') {
|
||||
gd = 1.0;
|
||||
gw = 1.0;
|
||||
} else if (net[0] == 'x') {
|
||||
gd = 1.33;
|
||||
gw = 1.25;
|
||||
} else if (net[0] == 'c' && argc == 8) {
|
||||
gd = atof(argv[6]);
|
||||
gw = atof(argv[7]);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else if (std::string(argv[1]) == "-d" && argc == 5) {
|
||||
engine = std::string(argv[2]);
|
||||
img_dir = std::string(argv[3]);
|
||||
labels_filename = std::string(argv[4]);
|
||||
bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, float& gd, float& gw, std::string& img_dir, std::string& labels_filename, int& n_classes, int& n_batch) {
|
||||
if (argc < 4) return false;
|
||||
if (std::string(argv[1]) == "-s" && (argc == 6 || argc == 7 || argc == 8)) {
|
||||
wts = std::string(argv[2]);
|
||||
engine = std::string(argv[3]);
|
||||
n_classes = atoi(argv[4]);
|
||||
if (n_classes < 1)
|
||||
return false;
|
||||
auto net = std::string(argv[5]);
|
||||
if (net[0] == 'n') {
|
||||
gd = 0.33;
|
||||
gw = 0.25;
|
||||
} else if (net[0] == 's') {
|
||||
gd = 0.33;
|
||||
gw = 0.50;
|
||||
} else if (net[0] == 'm') {
|
||||
gd = 0.67;
|
||||
gw = 0.75;
|
||||
} else if (net[0] == 'l') {
|
||||
gd = 1.0;
|
||||
gw = 1.0;
|
||||
} else if (net[0] == 'x') {
|
||||
gd = 1.33;
|
||||
gw = 1.25;
|
||||
} else if (net[0] == 'c' && argc == 8) {
|
||||
gd = atof(argv[6]);
|
||||
gw = atof(argv[7]);
|
||||
} else {
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if (argc == 7) {
|
||||
n_batch = atoi(argv[6]);
|
||||
}
|
||||
} else if (std::string(argv[1]) == "-d" && argc == 5) {
|
||||
engine = std::string(argv[2]);
|
||||
img_dir = std::string(argv[3]);
|
||||
labels_filename = std::string(argv[4]);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
@ -98,19 +101,21 @@ int main(int argc, char** argv) {
|
|||
std::string labels_filename = "";
|
||||
float gd = 0.0f, gw = 0.0f;
|
||||
int n_classes;
|
||||
int n_batch = 1;
|
||||
|
||||
std::string img_dir;
|
||||
if (!parse_args(argc, argv, wts_name, engine_name, gd, gw, img_dir, labels_filename, n_classes)) {
|
||||
if (!parse_args(argc, argv, wts_name, engine_name, gd, gw, img_dir, labels_filename, n_classes, n_batch)) {
|
||||
std::cerr << "arguments not right!" << std::endl;
|
||||
std::cerr << "./SpireCVSeg -s [.wts] [.engine] n_classes [n/s/m/l/x or c gd gw] // serialize model to plan file" << std::endl;
|
||||
// std::cerr << "./SpireCVSeg -d [.engine] ../images coco.txt // deserialize plan file, read the labels file and run inference" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
std::cout << "n_classes: " << n_classes << std::endl;
|
||||
std::cout << "max_batch: " << n_batch << std::endl;
|
||||
|
||||
// Create a model using the API directly and serialize it to a file
|
||||
if (!wts_name.empty()) {
|
||||
serialize_engine(kBatchSize, gd, gw, wts_name, engine_name, n_classes);
|
||||
serialize_engine(n_batch, gd, gw, wts_name, engine_name, n_classes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ the use of this software, even if advised of the possibility of such damage.
|
|||
#include <opencv2/imgproc.hpp>
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
#include <sv_world.h>
|
||||
#include "aruco_samples_utility.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -58,7 +59,10 @@ const char* keys =
|
|||
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
|
||||
"{cd | | Input file with custom dictionary }"
|
||||
"{@outfile |<none> | Output file with calibrated camera parameters }"
|
||||
"{v | | Input from video file, if ommited, input comes from camera }"
|
||||
"{imh | | Camera Image Height }"
|
||||
"{imw | | Camera Image Width }"
|
||||
"{fps | | Camera FPS }"
|
||||
"{tp | | 1:WEBCAM, 2:V4L2CAM, 3:G1, 4:Q10, 5:MIPI, 6:GX40 }"
|
||||
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
|
||||
"{dp | | File of marker detector parameters }"
|
||||
"{rs | false | Apply refind strategy }"
|
||||
|
@ -107,28 +111,47 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
bool refindStrategy = parser.get<bool>("rs");
|
||||
int camId = parser.get<int>("ci");
|
||||
String video;
|
||||
int imW = 800;
|
||||
int imH = 600;
|
||||
int fps = 30;
|
||||
int tp = 1;
|
||||
|
||||
if(parser.has("v")) {
|
||||
video = parser.get<String>("v");
|
||||
if(parser.has("imh")) {
|
||||
imH = parser.get<int>("imh");
|
||||
}
|
||||
if(parser.has("imw")) {
|
||||
imW = parser.get<int>("imw");
|
||||
}
|
||||
if(parser.has("fps")) {
|
||||
fps = parser.get<int>("fps");
|
||||
}
|
||||
if(parser.has("tp")) {
|
||||
tp = parser.get<int>("tp");
|
||||
}
|
||||
sv::CameraType c_type = sv::CameraType::WEBCAM;
|
||||
if (2 == tp)
|
||||
c_type = sv::CameraType::V4L2CAM;
|
||||
else if (3 == tp)
|
||||
c_type = sv::CameraType::G1;
|
||||
else if (4 == tp)
|
||||
c_type = sv::CameraType::Q10;
|
||||
else if (5 == tp)
|
||||
c_type = sv::CameraType::MIPI;
|
||||
else if (6 == tp)
|
||||
c_type = sv::CameraType::GX40;
|
||||
|
||||
if(!parser.check()) {
|
||||
parser.printErrors();
|
||||
return 0;
|
||||
}
|
||||
|
||||
VideoCapture inputVideo;
|
||||
int waitTime;
|
||||
if(!video.empty()) {
|
||||
inputVideo.open(video);
|
||||
waitTime = 0;
|
||||
} else {
|
||||
char pipe[512];
|
||||
sprintf(pipe, "rtsp://192.168.2.64:554/H264?W=1280&H=720&FPS=30&BR=4000000");
|
||||
inputVideo.open(pipe);
|
||||
waitTime = 10;
|
||||
}
|
||||
// VideoCapture inputVideo;
|
||||
sv::Camera inputVideo;
|
||||
inputVideo.setWH(imW, imH);
|
||||
inputVideo.setFps(fps);
|
||||
inputVideo.open(c_type, camId);
|
||||
|
||||
int waitTime = 10;
|
||||
|
||||
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
|
||||
if (parser.has("d")) {
|
||||
|
@ -158,9 +181,9 @@ int main(int argc, char *argv[]) {
|
|||
vector< Mat > allImgs;
|
||||
Size imgSize;
|
||||
|
||||
while(inputVideo.grab()) {
|
||||
while(1) {
|
||||
Mat image, imageCopy;
|
||||
inputVideo.retrieve(image);
|
||||
inputVideo.read(image);
|
||||
|
||||
vector< int > ids;
|
||||
vector< vector< Point2f > > corners, rejected;
|
||||
|
|
|
@ -0,0 +1,256 @@
|
|||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/objdetect/aruco_detector.hpp>
|
||||
#include <iostream>
|
||||
#include "aruco_samples_utility.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
namespace {
|
||||
const char* about = "Create an ArUco marker image";
|
||||
|
||||
//! [aruco_create_markers_keys]
|
||||
const char* keys =
|
||||
"{@outfile |<none> | Output image }"
|
||||
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
|
||||
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
|
||||
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
|
||||
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
|
||||
"{cd | | Input file with custom dictionary }"
|
||||
"{id | | Marker id in the dictionary }"
|
||||
"{ms | 200 | Marker size in pixels }"
|
||||
"{bs | 50 | Border size in pixels }"
|
||||
"{lp | 50 | Landing Pad Unit in pixels }"
|
||||
"{bb | 1 | Number of bits in marker borders }"
|
||||
"{si | false | show generated image }";
|
||||
}
|
||||
//! [aruco_create_markers_keys]
|
||||
|
||||
|
||||
Mat create_marker_with_borders(aruco::Dictionary& dictionary, int markerId, int markerSize, int borderBits, int borderSize) {
|
||||
Mat tmpImg;
|
||||
aruco::generateImageMarker(dictionary, markerId, markerSize, tmpImg, borderBits);
|
||||
Mat tmpImgCopy = Mat::ones(borderSize * 2 + markerSize, borderSize * 2 + markerSize, CV_8UC1) * 255;
|
||||
tmpImg.copyTo(tmpImgCopy(Rect(borderSize, borderSize, markerSize, markerSize)));
|
||||
tmpImg = tmpImgCopy;
|
||||
return tmpImg;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
CommandLineParser parser(argc, argv, keys);
|
||||
parser.about(about);
|
||||
|
||||
if(argc < 4) {
|
||||
parser.printMessage();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int markerId = parser.get<int>("id");
|
||||
int borderBits = parser.get<int>("bb");
|
||||
int markerSize = parser.get<int>("ms");
|
||||
bool showImage = parser.get<bool>("si");
|
||||
|
||||
int borderSize = 0;
|
||||
if (parser.has("bs")) {
|
||||
borderSize = parser.get<int>("bs");
|
||||
}
|
||||
int landingPadUnit = 0;
|
||||
if (parser.has("lp")) {
|
||||
landingPadUnit = parser.get<int>("lp");
|
||||
borderSize = landingPadUnit;
|
||||
borderBits = 1;
|
||||
markerSize = landingPadUnit * 4;
|
||||
}
|
||||
|
||||
String out = parser.get<String>(0);
|
||||
|
||||
if(!parser.check()) {
|
||||
parser.printErrors();
|
||||
return 0;
|
||||
}
|
||||
|
||||
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
|
||||
if (parser.has("d")) {
|
||||
int dictionaryId = parser.get<int>("d");
|
||||
dictionary = aruco::getPredefinedDictionary(aruco::PredefinedDictionaryType(dictionaryId));
|
||||
}
|
||||
else if (parser.has("cd")) {
|
||||
FileStorage fs(parser.get<std::string>("cd"), FileStorage::READ);
|
||||
bool readOk = dictionary.aruco::Dictionary::readDictionary(fs.root());
|
||||
if(!readOk) {
|
||||
std::cerr << "Invalid dictionary file" << std::endl;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
std::cerr << "Dictionary not specified" << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
Mat markerImg;
|
||||
aruco::generateImageMarker(dictionary, markerId, markerSize, markerImg, borderBits);
|
||||
if (borderSize > 0) {
|
||||
Mat markerImgCopy = Mat::ones(borderSize * 2 + markerSize, borderSize * 2 + markerSize, CV_8UC1) * 255;
|
||||
markerImg.copyTo(markerImgCopy(Rect(borderSize, borderSize, markerSize, markerSize)));
|
||||
markerImg = markerImgCopy;
|
||||
}
|
||||
|
||||
if (landingPadUnit > 0) {
|
||||
Mat markerImgBIG = Mat::ones(landingPadUnit * 62, landingPadUnit * 62, CV_8UC1) * 255;
|
||||
// markerId = 0;
|
||||
markerId --;
|
||||
markerSize = landingPadUnit * 4;
|
||||
borderSize = landingPadUnit;
|
||||
int newSize = markerSize + borderSize * 2;
|
||||
|
||||
for (int i=0; i<3; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(i * landingPadUnit * 5, 0, newSize, newSize)));
|
||||
}
|
||||
for (int i=0; i<5; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 18 + i * landingPadUnit * 5, 0, newSize, newSize)));
|
||||
}
|
||||
for (int i=0; i<3; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 46 + i * landingPadUnit * 5, 0, newSize, newSize)));
|
||||
}
|
||||
|
||||
for (int i=0; i<2; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 56, landingPadUnit * 5 + i * landingPadUnit * 5, newSize, newSize)));
|
||||
}
|
||||
for (int i=0; i<5; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 56, landingPadUnit * 18 + i * landingPadUnit * 5, newSize, newSize)));
|
||||
}
|
||||
for (int i=0; i<3; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 56, landingPadUnit * 46 + i * landingPadUnit * 5, newSize, newSize)));
|
||||
}
|
||||
|
||||
for (int i=0; i<2; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 51 - i * landingPadUnit * 5, landingPadUnit * 56, newSize, newSize)));
|
||||
}
|
||||
for (int i=0; i<5; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 38 - i * landingPadUnit * 5, landingPadUnit * 56, newSize, newSize)));
|
||||
}
|
||||
for (int i=0; i<3; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 10 - i * landingPadUnit * 5, landingPadUnit * 56, newSize, newSize)));
|
||||
}
|
||||
|
||||
for (int i=0; i<2; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(0, landingPadUnit * 51 - i * landingPadUnit * 5, newSize, newSize)));
|
||||
}
|
||||
for (int i=0; i<5; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(0, landingPadUnit * 38 - i * landingPadUnit * 5, newSize, newSize)));
|
||||
}
|
||||
for (int i=0; i<2; i++) {
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(0, landingPadUnit * 10 - i * landingPadUnit * 5, newSize, newSize)));
|
||||
}
|
||||
|
||||
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 28, landingPadUnit * 28, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 28, landingPadUnit * 23, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 33, landingPadUnit * 28, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 28, landingPadUnit * 33, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 23, landingPadUnit * 28, newSize, newSize)));
|
||||
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 28, landingPadUnit * 18, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 38, landingPadUnit * 28, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 28, landingPadUnit * 38, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 18, landingPadUnit * 28, newSize, newSize)));
|
||||
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 28, landingPadUnit * 5, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 51, landingPadUnit * 28, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 28, landingPadUnit * 51, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 5, landingPadUnit * 28, newSize, newSize)));
|
||||
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 28, landingPadUnit * 10, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 46, landingPadUnit * 28, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 28, landingPadUnit * 46, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 10, landingPadUnit * 28, newSize, newSize)));
|
||||
|
||||
// markerId = 90;
|
||||
markerSize = landingPadUnit * 4 * 4;
|
||||
borderSize = landingPadUnit * 4;
|
||||
newSize = markerSize + borderSize * 2;
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 5, landingPadUnit * 5, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 5 + newSize + landingPadUnit * 4, landingPadUnit * 5, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 5, landingPadUnit * 5 + newSize + landingPadUnit * 4, newSize, newSize)));
|
||||
markerId += 1;
|
||||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 5 + newSize + landingPadUnit * 4, landingPadUnit * 5 + newSize + landingPadUnit * 4, newSize, newSize)));
|
||||
|
||||
markerImg = Mat::ones(landingPadUnit * 64, landingPadUnit * 64, CV_8UC1) * 255;
|
||||
markerImgBIG.copyTo(markerImg(Rect(landingPadUnit, landingPadUnit, landingPadUnit * 62, landingPadUnit * 62)));
|
||||
}
|
||||
|
||||
if(showImage) {
|
||||
imshow("marker", markerImg);
|
||||
waitKey(0);
|
||||
}
|
||||
|
||||
imwrite(out, markerImg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -9,13 +9,14 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化Aruco检测器类
|
||||
sv::ArucoDetector ad;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
|
||||
ad.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
ad.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
cap.setWH(1280, 720);
|
||||
cap.setWH(ad.image_width, ad.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::WEBCAM, 4); // CameraID 0
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
|
@ -8,9 +8,9 @@ using namespace std;
|
|||
int main(int argc, char *argv[]) {
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
// cap.setWH(640, 480);
|
||||
// cap.setFps(30);
|
||||
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
|
||||
cap.setWH(1280, 720);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
while (1)
|
||||
|
|
|
@ -11,13 +11,14 @@ int main(int argc, char *argv[])
|
|||
// 实例化 color line detection 检测器类
|
||||
sv::ColorLineDetector cld;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
cld.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
cld.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cld.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
cap.setWH(640, 480);
|
||||
// cap.setFps(30);
|
||||
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
|
||||
cap.setWH(cld.image_width, cld.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
|
@ -9,13 +9,14 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化 通用目标 检测器类
|
||||
sv::CommonObjectDetector cod;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
// cap.setWH(640, 480);
|
||||
// cap.setFps(30);
|
||||
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
|
||||
cap.setWH(cod.image_width, cod.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
|
@ -40,19 +40,21 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化 框选目标跟踪类
|
||||
sv::SingleObjectTracker sot;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
|
||||
sot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
sot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
|
||||
sv::CommonObjectDetector cod;
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
cap.setWH(1280, 720);
|
||||
cap.setWH(cod.image_width, cod.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::WEBCAM, 4); // CameraID 0
|
||||
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue