Compare commits
5 Commits
master
...
rewrite-ca
Author | SHA1 | Date |
---|---|---|
|
fe18e3e0b6 | |
|
5d2e5b0250 | |
|
9f35bf476e | |
|
173acc7036 | |
|
a0d3e258d9 |
|
@ -1,13 +1,6 @@
|
|||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
models/
|
||||
confs/
|
||||
ZLM/
|
||||
ZLMediaKit/
|
||||
ffmpeg-4.2.5/
|
||||
nv-codec-headers/
|
||||
*.bz2
|
||||
develop-eggs/
|
||||
dist/
|
||||
eggs/
|
||||
|
@ -22,11 +15,6 @@ share/python-wheels/
|
|||
*.egg
|
||||
MANIFEST
|
||||
.idea/
|
||||
calib_webcam_1280x720.yaml
|
||||
calib_webcam_640x480.yaml
|
||||
sv_algorithm_params.json
|
||||
sv_algorithm_params_coco_1280.json
|
||||
sv_algorithm_params_coco_640.json
|
||||
|
||||
# Prerequisites
|
||||
*.d
|
||||
|
|
|
@ -24,7 +24,6 @@ else()
|
|||
option(USE_FFMPEG "BUILD WITH FFMPEG." ON)
|
||||
elseif(PLATFORM STREQUAL "X86_INTEL")
|
||||
add_definitions(-DPLATFORM_X86_INTEL)
|
||||
option(USE_INTEL "BUILD WITH INTEL." ON)
|
||||
option(USE_FFMPEG "BUILD WITH FFMPEG." ON)
|
||||
else()
|
||||
message(FATAL_ERROR "UNSUPPORTED PLATFORM!")
|
||||
|
@ -39,10 +38,6 @@ if(USE_CUDA)
|
|||
message(STATUS "CUDA: ON")
|
||||
endif()
|
||||
|
||||
if(USE_INTEL)
|
||||
add_definitions(-DWITH_INTEL)
|
||||
message(STATUS "INTEL: ON")
|
||||
endif()
|
||||
|
||||
if(USE_GSTREAMER)
|
||||
add_definitions(-DWITH_GSTREAMER)
|
||||
|
@ -77,14 +72,9 @@ include_directories(
|
|||
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/GX40
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/intel
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/intel
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/veri/intel
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/cuda
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/sot/ocv470
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/mot/sort
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/mot/bytetrack
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/color_line
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/video_io
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/veri/cuda
|
||||
|
@ -129,7 +119,8 @@ set(
|
|||
include/sv_mot.h
|
||||
include/sv_color_line.h
|
||||
include/sv_veri_det.h
|
||||
include/sv_video_input.h
|
||||
# include/sv_video_input.h
|
||||
include/sv_camera.h
|
||||
include/sv_video_output.h
|
||||
include/sv_world.h
|
||||
)
|
||||
|
@ -184,10 +175,6 @@ file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/*.cpp)
|
|||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/mot/sort/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/mot/bytetrack/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
|
||||
if(USE_CUDA)
|
||||
list(APPEND spirecv_SRCS algorithm/common_det/cuda/yolov7/preprocess.cu)
|
||||
|
@ -201,23 +188,7 @@ if(USE_CUDA)
|
|||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
endif()
|
||||
|
||||
if(USE_INTEL)
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/intel/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/intel/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/veri/intel/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
endif()
|
||||
|
||||
if(USE_FFMPEG)
|
||||
if(USE_INTEL)
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/x86_intel/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
elseif(USE_CUDA)
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/x86_cuda/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
endif()
|
||||
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/*.cpp)
|
||||
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
|
||||
endif()
|
||||
|
@ -261,22 +232,10 @@ if(USE_CUDA) # PLATFORM_X86_CUDA & PLATFORM_JETSON
|
|||
target_link_libraries(SpireCVSeg sv_world)
|
||||
|
||||
elseif(PLATFORM STREQUAL "X86_INTEL") # Links to Intel-OpenVINO libraries here
|
||||
# Intel-Openvino
|
||||
include_directories(
|
||||
PUBLIC /opt/intel/openvino_2022/runtime/include/
|
||||
PUBLIC /opt/intel/openvino_2022/runtime/include/ie/
|
||||
)
|
||||
link_directories(
|
||||
${InferenceEngine_LIBRARIES}
|
||||
/opt/intel/openvino_2022/runtime/lib/intel64/libopenvino.so
|
||||
)
|
||||
|
||||
add_library(sv_world SHARED ${spirecv_SRCS})
|
||||
target_link_libraries(
|
||||
sv_world ${OpenCV_LIBS}
|
||||
sv_gimbal
|
||||
${InferenceEngine_LIBRARIES}
|
||||
/opt/intel/openvino_2022/runtime/lib/intel64/libopenvino.so
|
||||
)
|
||||
endif()
|
||||
|
||||
|
@ -305,8 +264,6 @@ add_executable(SingleObjectTracking samples/demo/single_object_tracking.cpp)
|
|||
target_link_libraries(SingleObjectTracking sv_world)
|
||||
add_executable(MultipleObjectTracking samples/demo/multiple_object_tracking.cpp)
|
||||
target_link_libraries(MultipleObjectTracking sv_world)
|
||||
add_executable(EvalMOTMetric samples/demo/eval_MOT_metric.cpp)
|
||||
target_link_libraries(EvalMOTMetric -lstdc++fs sv_world)
|
||||
add_executable(ColorLineDetection samples/demo/color_line_detect.cpp)
|
||||
target_link_libraries(ColorLineDetection sv_world)
|
||||
add_executable(UdpDetectionInfoReceiver samples/demo/udp_detection_info_receiver.cpp)
|
||||
|
|
|
@ -38,9 +38,9 @@ SpireCV is an **real-time edge perception SDK** built for **intelligent unmanned
|
|||
- **Platform level**:
|
||||
- [x] X86 + Nvidia GPUs (10 series, 20 series, and 30 series graphics cards recommended)
|
||||
- [x] Jetson (AGX Orin/Xavier、Orin NX/Nano、Xavier NX)
|
||||
- [x] Intel CPU
|
||||
- [ ] HUAWEI Ascend (coming soon)
|
||||
- [ ] Intel CPU (coming soon)
|
||||
- [ ] Rockchip (coming soon)
|
||||
- [ ] HUAWEI Ascend (coming soon)
|
||||
|
||||
## Demos
|
||||
- **QR code detection**
|
||||
|
|
|
@ -38,9 +38,9 @@ SpireCV是一个专为**智能无人系统**打造的**边缘实时感知SDK**
|
|||
- **平台层**:
|
||||
- [x] X86+Nvidia GPU(推荐10系、20系、30系显卡)
|
||||
- [x] Jetson(AGX Orin/Xavier、Orin NX/Nano、Xavier NX)
|
||||
- [x] Intel CPU
|
||||
- [ ] HUAWEI Ascend(推进中)
|
||||
- [ ] Intel CPU(推进中)
|
||||
- [ ] Rockchip(推进中)
|
||||
- [ ] HUAWEI Ascend(推进中)
|
||||
|
||||
## 功能展示
|
||||
- **二维码检测**
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#include "common_det_cuda_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
@ -364,51 +362,17 @@ bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_, bo
|
|||
bool with_segmentation = base_->withSegmentation();
|
||||
double thrs_conf = base_->getThrsConf();
|
||||
double thrs_nms = base_->getThrsNms();
|
||||
std::string model = base_->getModel();
|
||||
int bs = base_->getBatchSize();
|
||||
char bs_c[8];
|
||||
sprintf(bs_c, "%d", bs);
|
||||
std::string bs_s(bs_c);
|
||||
|
||||
std::string engine_fn = get_home() + SV_MODEL_DIR + dataset + ".engine";
|
||||
std::vector<std::string> files;
|
||||
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-" + dataset + "-yolov5" + model + "_b" + bs_s + "_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
engine_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
|
||||
if (input_w == 1280)
|
||||
{
|
||||
files.clear();
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-" + dataset + "-yolov5" + model + "6_b" + bs_s + "_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
engine_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.engine";
|
||||
}
|
||||
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.engine";
|
||||
}
|
||||
if (with_segmentation)
|
||||
{
|
||||
base_->setInputH(640);
|
||||
base_->setInputW(640);
|
||||
files.clear();
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-" + dataset + "-yolov5" + model + "_seg_b" + bs_s + "_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
engine_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.engine";
|
||||
}
|
||||
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.engine";
|
||||
}
|
||||
std::cout << "Load: " << engine_fn << std::endl;
|
||||
if (!is_file_exist(engine_fn))
|
||||
|
@ -450,5 +414,17 @@ bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_, bo
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1,479 +0,0 @@
|
|||
#include "common_det_intel_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
using namespace dnn;
|
||||
#endif
|
||||
|
||||
float sigmoid_function(float a)
|
||||
{
|
||||
float b = 1. / (1. + exp(-a));
|
||||
return b;
|
||||
}
|
||||
|
||||
cv::Mat letterbox(cv::Mat &img_, std::vector<float> &paddings)
|
||||
{
|
||||
std::vector<int> new_shape = {640, 640};
|
||||
|
||||
// Get current image shape [height, width]
|
||||
int img_h = img_.rows;
|
||||
int img_w = img_.cols;
|
||||
|
||||
// Compute scale ratio(new / old) and target resized shape
|
||||
float scale = std::min(new_shape[1] * 1.0 / img_h, new_shape[0] * 1.0 / img_w);
|
||||
int resize_h = int(round(img_h * scale));
|
||||
int resize_w = int(round(img_w * scale));
|
||||
paddings[0] = scale;
|
||||
|
||||
// Compute padding
|
||||
int pad_h = new_shape[1] - resize_h;
|
||||
int pad_w = new_shape[0] - resize_w;
|
||||
|
||||
// Resize and pad image while meeting stride-multiple constraints
|
||||
cv::Mat resized_img;
|
||||
cv::resize(img_, resized_img, cv::Size(resize_w, resize_h));
|
||||
|
||||
// divide padding into 2 sides
|
||||
float half_h = pad_h * 1.0 / 2;
|
||||
float half_w = pad_w * 1.0 / 2;
|
||||
paddings[1] = half_h;
|
||||
paddings[2] = half_w;
|
||||
|
||||
// Compute padding boarder
|
||||
int top = int(round(half_h - 0.1));
|
||||
int bottom = int(round(half_h + 0.1));
|
||||
int left = int(round(half_w - 0.1));
|
||||
int right = int(round(half_w + 0.1));
|
||||
|
||||
// Add border
|
||||
cv::copyMakeBorder(resized_img, resized_img, top, bottom, left, right, 0, cv::Scalar(114, 114, 114));
|
||||
|
||||
return resized_img;
|
||||
}
|
||||
|
||||
CommonObjectDetectorIntelImpl::CommonObjectDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
CommonObjectDetectorIntelImpl::~CommonObjectDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::intelDetect(
|
||||
CommonObjectDetectorBase *base_,
|
||||
cv::Mat img_,
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_,
|
||||
bool input_4k_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
int input_h = base_->getInputH();
|
||||
int input_w = base_->getInputW();
|
||||
bool with_segmentation = base_->withSegmentation();
|
||||
double thrs_conf = base_->getThrsConf();
|
||||
double thrs_nms = base_->getThrsNms();
|
||||
|
||||
if (with_segmentation)
|
||||
{
|
||||
std::vector<float> paddings(3); // scale, half_h, half_w
|
||||
this->preprocess_img_seg(img_, paddings);
|
||||
|
||||
infer_request.start_async();
|
||||
infer_request.wait();
|
||||
|
||||
// Postprocess
|
||||
this->postprocess_img_seg(img_, paddings, boxes_x_, boxes_y_, boxes_w_, boxes_h_, boxes_label_, boxes_score_, boxes_seg_, thrs_conf, thrs_nms);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Preprocess
|
||||
this->preprocess_img(img_);
|
||||
|
||||
// Run inference
|
||||
infer_request.start_async();
|
||||
infer_request.wait();
|
||||
|
||||
// Postprocess
|
||||
this->postprocess_img(boxes_x_, boxes_y_, boxes_w_, boxes_h_, boxes_label_, boxes_score_, thrs_conf, thrs_nms);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
bool CommonObjectDetectorIntelImpl::intelSetup(CommonObjectDetectorBase *base_, bool input_4k_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
ov::Core core;
|
||||
std::string dataset = base_->getDataset();
|
||||
double thrs_conf = base_->getThrsConf();
|
||||
double thrs_nms = base_->getThrsNms();
|
||||
inpHeight = base_->getInputH();
|
||||
inpWidth = base_->getInputW();
|
||||
with_segmentation = base_->withSegmentation();
|
||||
std::string model = base_->getModel();
|
||||
|
||||
std::string openvino_fn = get_home() + SV_MODEL_DIR + dataset + ".onnx";
|
||||
std::vector<std::string> files;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-" + dataset + "-yolov5" + model + "_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
|
||||
if (inpWidth == 1280)
|
||||
{
|
||||
files.clear();
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-" + dataset + "-yolov5" + model + "6_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.onnx";
|
||||
}
|
||||
}
|
||||
if (with_segmentation)
|
||||
{
|
||||
base_->setInputH(640);
|
||||
base_->setInputW(640);
|
||||
files.clear();
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-" + dataset + "-yolov5" + model + "_seg_c");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
openvino_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.onnx";
|
||||
}
|
||||
}
|
||||
std::cout << "Load: " << openvino_fn << std::endl;
|
||||
if (!is_file_exist(openvino_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the CommonObject OpenVINO model (File Not Exist)");
|
||||
}
|
||||
|
||||
if (input_4k_ && with_segmentation)
|
||||
{
|
||||
throw std::runtime_error("SpireCV (106) Resolution 4K DO NOT Support Segmentation!");
|
||||
}
|
||||
|
||||
if (with_segmentation)
|
||||
{
|
||||
this->compiled_model = core.compile_model(openvino_fn, "GPU");
|
||||
this->infer_request = compiled_model.create_infer_request();
|
||||
}
|
||||
else
|
||||
{
|
||||
std::shared_ptr<ov::Model> model_ = core.read_model(openvino_fn);
|
||||
ov::preprocess::PrePostProcessor Pre_P = ov::preprocess::PrePostProcessor(model_);
|
||||
Pre_P.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::RGB);
|
||||
Pre_P.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255, 255, 255}); // .scale({ 112, 112, 112 });
|
||||
Pre_P.input().model().set_layout("NCHW");
|
||||
Pre_P.output().tensor().set_element_type(ov::element::f32);
|
||||
model_ = Pre_P.build();
|
||||
this->compiled_model = core.compile_model(model_, "GPU");
|
||||
this->infer_request = compiled_model.create_infer_request();
|
||||
}
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::preprocess_img(cv::Mat &img_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
float width = img_.cols;
|
||||
float height = img_.rows;
|
||||
cv::Size new_shape = cv::Size(inpHeight, inpWidth);
|
||||
float r = float(new_shape.width / max(width, height));
|
||||
int new_unpadW = int(round(width * r));
|
||||
int new_unpadH = int(round(height * r));
|
||||
|
||||
cv::resize(img_, resize.resized_image, cv::Size(new_unpadW, new_unpadH), 0, 0, cv::INTER_AREA);
|
||||
resize.resized_image = resize.resized_image;
|
||||
resize.dw = new_shape.width - new_unpadW;
|
||||
resize.dh = new_shape.height - new_unpadH;
|
||||
cv::Scalar color = cv::Scalar(100, 100, 100);
|
||||
cv::copyMakeBorder(resize.resized_image, resize.resized_image, 0, resize.dh, 0, resize.dw, cv::BORDER_CONSTANT, color);
|
||||
|
||||
this->rx = (float)img_.cols / (float)(resize.resized_image.cols - resize.dw);
|
||||
this->ry = (float)img_.rows / (float)(resize.resized_image.rows - resize.dh);
|
||||
if (with_segmentation)
|
||||
{
|
||||
cv::Mat blob = cv::dnn::blobFromImage(resize.resized_image, 1 / 255.0, cv::Size(640, 640), cv::Scalar(0, 0, 0), true);
|
||||
auto input_port = compiled_model.input();
|
||||
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blob.ptr(0));
|
||||
infer_request.set_input_tensor(input_tensor);
|
||||
}
|
||||
else
|
||||
{
|
||||
float *input_data = (float *)resize.resized_image.data;
|
||||
input_tensor = ov::Tensor(compiled_model.input().get_element_type(), compiled_model.input().get_shape(), input_data);
|
||||
infer_request.set_input_tensor(input_tensor);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::preprocess_img_seg(cv::Mat &img_, std::vector<float> &paddings)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
cv::Mat masked_img;
|
||||
cv::Mat resized_img = letterbox(img_, paddings); // resize to (640,640) by letterbox
|
||||
// BGR->RGB, u8(0-255)->f32(0.0-1.0), HWC->NCHW
|
||||
cv::Mat blob = cv::dnn::blobFromImage(resized_img, 1 / 255.0, cv::Size(640, 640), cv::Scalar(0, 0, 0), true);
|
||||
|
||||
// Get input port for model with one input
|
||||
auto input_port = compiled_model.input();
|
||||
// Create tensor from external memory
|
||||
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blob.ptr(0));
|
||||
// Set input tensor for model with one input
|
||||
infer_request.set_input_tensor(input_tensor);
|
||||
#endif
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::postprocess_img_seg(cv::Mat &img_,
|
||||
std::vector<float> &paddings,
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_,
|
||||
double &thrs_conf,
|
||||
double &thrs_nms)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
const ov::Tensor &detect = infer_request.get_output_tensor(0);
|
||||
ov::Shape detect_shape = detect.get_shape();
|
||||
const ov::Tensor &proto = infer_request.get_output_tensor(1);
|
||||
ov::Shape proto_shape = proto.get_shape();
|
||||
|
||||
cv::Mat detect_buffer(detect_shape[1], detect_shape[2], CV_32F, detect.data());
|
||||
cv::Mat proto_buffer(proto_shape[1], proto_shape[2] * proto_shape[3], CV_32F, proto.data());
|
||||
|
||||
cv::RNG rng;
|
||||
float conf_threshold = thrs_conf;
|
||||
float nms_threshold = thrs_nms;
|
||||
std::vector<cv::Rect> boxes;
|
||||
std::vector<int> class_ids;
|
||||
std::vector<float> class_scores;
|
||||
std::vector<float> confidences;
|
||||
std::vector<cv::Mat> masks;
|
||||
|
||||
float scale = paddings[0];
|
||||
for (int i = 0; i < detect_buffer.rows; i++)
|
||||
{
|
||||
float confidence = detect_buffer.at<float>(i, 4);
|
||||
if (confidence < conf_threshold)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
cv::Mat classes_scores = detect_buffer.row(i).colRange(5, 85);
|
||||
cv::Point class_id;
|
||||
double score;
|
||||
cv::minMaxLoc(classes_scores, NULL, &score, NULL, &class_id);
|
||||
|
||||
// class score: 0~1
|
||||
if (score > 0.25)
|
||||
{
|
||||
cv::Mat mask = detect_buffer.row(i).colRange(85, 117);
|
||||
float cx = detect_buffer.at<float>(i, 0);
|
||||
float cy = detect_buffer.at<float>(i, 1);
|
||||
float w = detect_buffer.at<float>(i, 2);
|
||||
float h = detect_buffer.at<float>(i, 3);
|
||||
int left = static_cast<int>((cx - 0.5 * w - paddings[2]) / scale);
|
||||
int top = static_cast<int>((cy - 0.5 * h - paddings[1]) / scale);
|
||||
int width = static_cast<int>(w / scale);
|
||||
int height = static_cast<int>(h / scale);
|
||||
cv::Rect box;
|
||||
box.x = left;
|
||||
box.y = top;
|
||||
box.width = width;
|
||||
box.height = height;
|
||||
|
||||
boxes.push_back(box);
|
||||
class_ids.push_back(class_id.x);
|
||||
class_scores.push_back(score);
|
||||
confidences.push_back(confidence);
|
||||
masks.push_back(mask);
|
||||
}
|
||||
}
|
||||
|
||||
// NMS
|
||||
std::vector<int> indices;
|
||||
cv::dnn::NMSBoxes(boxes, confidences, thrs_conf, thrs_nms, indices);
|
||||
// cv::Mat rgb_mask;
|
||||
cv::Mat rgb_mask = cv::Mat::zeros(img_.size(), img_.type());
|
||||
|
||||
for (size_t i = 0; i < indices.size(); i++)
|
||||
{
|
||||
int index = indices[i];
|
||||
int class_id = class_ids[index];
|
||||
cv::Rect box = boxes[index];
|
||||
int x1 = std::max(0, box.x);
|
||||
int y1 = std::max(0, box.y);
|
||||
int x2 = std::max(0, box.br().x);
|
||||
int y2 = std::max(0, box.br().y);
|
||||
|
||||
cv::Mat m = masks[index] * proto_buffer;
|
||||
for (int col = 0; col < m.cols; col++)
|
||||
{
|
||||
m.at<float>(0, col) = sigmoid_function(m.at<float>(0, col));
|
||||
}
|
||||
cv::Mat m1 = m.reshape(1, 160); // 1x25600 -> 160x160
|
||||
int mx1 = std::max(0, int((x1 * scale + paddings[2]) * 0.25));
|
||||
int mx2 = std::max(0, int((x2 * scale + paddings[2]) * 0.25));
|
||||
int my1 = std::max(0, int((y1 * scale + paddings[1]) * 0.25));
|
||||
int my2 = std::max(0, int((y2 * scale + paddings[1]) * 0.25));
|
||||
cv::Mat mask_roi = m1(cv::Range(my1, my2), cv::Range(mx1, mx2));
|
||||
|
||||
cv::Mat rm, det_mask;
|
||||
cv::resize(mask_roi, rm, cv::Size(x2 - x1, y2 - y1));
|
||||
for (int r = 0; r < rm.rows; r++)
|
||||
{
|
||||
for (int c = 0; c < rm.cols; c++)
|
||||
{
|
||||
float pv = rm.at<float>(r, c);
|
||||
if (pv > 0.5)
|
||||
{
|
||||
rm.at<float>(r, c) = 1.0;
|
||||
}
|
||||
else
|
||||
{
|
||||
rm.at<float>(r, c) = 0.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rm = rm * rng.uniform(0, 255);
|
||||
rm.convertTo(det_mask, CV_8UC1);
|
||||
if ((y1 + det_mask.rows) >= img_.rows)
|
||||
{
|
||||
y2 = img_.rows - 1;
|
||||
}
|
||||
if ((x1 + det_mask.cols) >= img_.cols)
|
||||
{
|
||||
x2 = img_.cols - 1;
|
||||
}
|
||||
|
||||
cv::Mat mask = cv::Mat::zeros(cv::Size(img_.cols, img_.rows), CV_8UC1);
|
||||
det_mask(cv::Range(0, y2 - y1), cv::Range(0, x2 - x1)).copyTo(mask(cv::Range(y1, y2), cv::Range(x1, x2)));
|
||||
add(rgb_mask, cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), rgb_mask, mask);
|
||||
|
||||
boxes_x_.push_back(box.x);
|
||||
boxes_y_.push_back(box.y);
|
||||
boxes_w_.push_back(box.width);
|
||||
boxes_h_.push_back(box.height);
|
||||
|
||||
boxes_label_.push_back((int)class_id);
|
||||
boxes_score_.push_back(class_scores[index]);
|
||||
|
||||
cv::Mat mask_j = mask.clone();
|
||||
boxes_seg_.push_back(mask_j);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
void CommonObjectDetectorIntelImpl::postprocess_img(std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
double &thrs_conf,
|
||||
double &thrs_nms)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
const ov::Tensor &output_tensor = infer_request.get_output_tensor();
|
||||
ov::Shape output_shape = output_tensor.get_shape();
|
||||
float *detections = output_tensor.data<float>();
|
||||
|
||||
std::vector<cv::Rect> boxes;
|
||||
vector<int> class_ids;
|
||||
vector<float> confidences;
|
||||
for (int i = 0; i < output_shape[1]; i++)
|
||||
{
|
||||
float *detection = &detections[i * output_shape[2]];
|
||||
|
||||
float confidence = detection[4];
|
||||
if (confidence >= thrs_conf)
|
||||
{
|
||||
float *classes_scores = &detection[5];
|
||||
cv::Mat scores(1, output_shape[2] - 5, CV_32FC1, classes_scores);
|
||||
cv::Point class_id;
|
||||
double max_class_score;
|
||||
cv::minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
|
||||
if (max_class_score > thrs_conf)
|
||||
{
|
||||
confidences.push_back(confidence);
|
||||
class_ids.push_back(class_id.x);
|
||||
float x = detection[0];
|
||||
float y = detection[1];
|
||||
float w = detection[2];
|
||||
float h = detection[3];
|
||||
float xmin = x - (w / 2);
|
||||
float ymin = y - (h / 2);
|
||||
|
||||
boxes.push_back(cv::Rect(xmin, ymin, w, h));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<int> nms_result;
|
||||
cv::dnn::NMSBoxes(boxes, confidences, thrs_conf, thrs_nms, nms_result);
|
||||
|
||||
std::vector<Detection> output;
|
||||
for (int i = 0; i < nms_result.size(); i++)
|
||||
{
|
||||
Detection result;
|
||||
int idx = nms_result[i];
|
||||
result.class_id = class_ids[idx];
|
||||
result.confidence = confidences[idx];
|
||||
result.box = boxes[idx];
|
||||
output.push_back(result);
|
||||
}
|
||||
|
||||
for (int i = 0; i < output.size(); i++)
|
||||
{
|
||||
auto detection = output[i];
|
||||
auto box = detection.box;
|
||||
auto classId = detection.class_id;
|
||||
auto confidence = detection.confidence;
|
||||
|
||||
float xmax = box.x + box.width;
|
||||
float ymax = box.y + box.height;
|
||||
|
||||
boxes_x_.push_back(this->rx * box.x);
|
||||
boxes_y_.push_back(this->ry * box.y);
|
||||
boxes_w_.push_back(this->rx * box.width);
|
||||
boxes_h_.push_back(this->ry * box.height);
|
||||
|
||||
boxes_label_.push_back((int)detection.class_id);
|
||||
boxes_score_.push_back(detection.confidence);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
#ifndef __SV_COMMON_DET_INTEL__
|
||||
#define __SV_COMMON_DET_INTEL__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <opencv2/dnn.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#endif
|
||||
|
||||
struct Resize
|
||||
{
|
||||
cv::Mat resized_image;
|
||||
int dw;
|
||||
int dh;
|
||||
};
|
||||
|
||||
struct Detection
|
||||
{
|
||||
int class_id;
|
||||
float confidence;
|
||||
cv::Rect box;
|
||||
};
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
class CommonObjectDetectorIntelImpl
|
||||
{
|
||||
public:
|
||||
CommonObjectDetectorIntelImpl();
|
||||
~CommonObjectDetectorIntelImpl();
|
||||
|
||||
bool intelSetup(CommonObjectDetectorBase *base_, bool input_4k_);
|
||||
void intelDetect(
|
||||
CommonObjectDetectorBase *base_,
|
||||
cv::Mat img_,
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_,
|
||||
bool input_4k_);
|
||||
void preprocess_img(cv::Mat &img_);
|
||||
void preprocess_img_seg(cv::Mat &img_, std::vector<float> &paddings);
|
||||
void postprocess_img_seg(cv::Mat &img_,
|
||||
std::vector<float> &paddings,
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_,
|
||||
double &thrs_conf,
|
||||
double &thrs_nms);
|
||||
|
||||
void postprocess_img(std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
double &thrs_conf,
|
||||
double &thrs_nms);
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
int inpWidth;
|
||||
int inpHeight;
|
||||
bool with_segmentation;
|
||||
float rx; // the width ratio of original image and resized image
|
||||
float ry; // the height ratio of original image and resized image
|
||||
Resize resize;
|
||||
ov::Tensor input_tensor;
|
||||
ov::InferRequest infer_request;
|
||||
ov::CompiledModel compiled_model;
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
|
@ -8,10 +8,6 @@
|
|||
#include "common_det_cuda_impl.h"
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#include "common_det_intel_impl.h"
|
||||
#endif
|
||||
|
||||
namespace sv {
|
||||
|
||||
|
@ -22,10 +18,6 @@ CommonObjectDetector::CommonObjectDetector(bool input_4k)
|
|||
#ifdef WITH_CUDA
|
||||
this->_cuda_impl = new CommonObjectDetectorCUDAImpl;
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl = new CommonObjectDetectorIntelImpl;
|
||||
#endif
|
||||
}
|
||||
CommonObjectDetector::~CommonObjectDetector()
|
||||
{
|
||||
|
@ -36,10 +28,6 @@ bool CommonObjectDetector::setupImpl()
|
|||
#ifdef WITH_CUDA
|
||||
return this->_cuda_impl->cudaSetup(this, this->_input_4k);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
return this->_intel_impl->intelSetup(this, this->_input_4k);
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -65,23 +53,14 @@ void CommonObjectDetector::detectImpl(
|
|||
boxes_label_,
|
||||
boxes_score_,
|
||||
boxes_seg_,
|
||||
this->_input_4k);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl->intelDetect(
|
||||
this,
|
||||
img_,
|
||||
boxes_x_,
|
||||
boxes_y_,
|
||||
boxes_w_,
|
||||
boxes_h_,
|
||||
boxes_label_,
|
||||
boxes_score_,
|
||||
boxes_seg_,
|
||||
this->_input_4k);
|
||||
this->_input_4k
|
||||
);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#include "landing_det_cuda_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
@ -51,16 +50,6 @@ bool LandingMarkerDetectorCUDAImpl::cudaSetup()
|
|||
{
|
||||
#ifdef WITH_CUDA
|
||||
std::string trt_model_fn = get_home() + SV_MODEL_DIR + "LandingMarker.engine";
|
||||
|
||||
std::vector<std::string> files;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-LandingMarker-resnet34");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
trt_model_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
std::cout << "Load: " << trt_model_fn << std::endl;
|
||||
|
||||
if (!is_file_exist(trt_model_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the LandingMarker TensorRT model (File Not Exist)");
|
||||
|
|
|
@ -1,104 +0,0 @@
|
|||
#include "landing_det_intel_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
using namespace dnn;
|
||||
#endif
|
||||
|
||||
LandingMarkerDetectorIntelImpl::LandingMarkerDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
LandingMarkerDetectorIntelImpl::~LandingMarkerDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
bool LandingMarkerDetectorIntelImpl::intelSetup()
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
std::string onnx_model_fn = get_home() + SV_MODEL_DIR + "LandingMarker.onnx";
|
||||
std::vector<std::string> files;
|
||||
list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-LandingMarker-resnet34");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
onnx_model_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
|
||||
if (!is_file_exist(onnx_model_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the LandingMarker ONNX model (File Not Exist)");
|
||||
}
|
||||
|
||||
// OpenVINO
|
||||
ov::Core core;
|
||||
std::shared_ptr<ov::Model> model = core.read_model(onnx_model_fn);
|
||||
ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);
|
||||
ppp.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::RGB);
|
||||
ppp.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255, 255, 255}); // .scale({ 112, 112, 112 });
|
||||
ppp.input().model().set_layout("NCHW");
|
||||
ppp.output().tensor().set_element_type(ov::element::f32);
|
||||
model = ppp.build();
|
||||
this->compiled_model = core.compile_model(model, "GPU");
|
||||
this->infer_request = compiled_model.create_infer_request();
|
||||
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void LandingMarkerDetectorIntelImpl::intelRoiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<int> &output_labels_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
output_labels_.clear();
|
||||
|
||||
for (int i = 0; i < input_rois_.size(); i++)
|
||||
{
|
||||
cv::Mat e_roi = input_rois_[i];
|
||||
|
||||
// Get input port for model with one input
|
||||
auto input_port = compiled_model.input();
|
||||
// Create tensor from external memory
|
||||
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), e_roi.ptr(0));
|
||||
// Set input tensor for model with one input
|
||||
infer_request.set_input_tensor(input_tensor);
|
||||
//preprocess_img(e_roi);
|
||||
|
||||
// infer_request.infer();
|
||||
infer_request.start_async();
|
||||
infer_request.wait();
|
||||
|
||||
const ov::Tensor &output_tensor = infer_request.get_output_tensor();
|
||||
ov::Shape output_shape = output_tensor.get_shape();
|
||||
this->_p_prob = output_tensor.data<float>();
|
||||
|
||||
// Find max index
|
||||
double max = 0;
|
||||
int label = 0;
|
||||
for (int i = 0; i < 11; ++i)
|
||||
{
|
||||
if (max < this->_p_prob[i])
|
||||
{
|
||||
max = this->_p_prob[i];
|
||||
label = i;
|
||||
}
|
||||
}
|
||||
output_labels_.push_back(label);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
#ifndef __SV_LANDING_DET_INTEL__
|
||||
#define __SV_LANDING_DET_INTEL__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/aruco.hpp>
|
||||
#include <opencv2/tracking.hpp>
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#endif
|
||||
|
||||
namespace sv
|
||||
{
|
||||
class LandingMarkerDetectorIntelImpl
|
||||
{
|
||||
public:
|
||||
LandingMarkerDetectorIntelImpl();
|
||||
~LandingMarkerDetectorIntelImpl();
|
||||
|
||||
bool intelSetup();
|
||||
void intelRoiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<int> &output_labels_);
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
float *_p_prob;
|
||||
|
||||
ov::Tensor input_tensor;
|
||||
ov::InferRequest infer_request;
|
||||
ov::CompiledModel compiled_model;
|
||||
#endif
|
||||
};
|
||||
}
|
||||
#endif
|
|
@ -7,10 +7,6 @@
|
|||
#include "landing_det_cuda_impl.h"
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#include "landing_det_intel_impl.h"
|
||||
#endif
|
||||
|
||||
namespace sv {
|
||||
|
||||
|
@ -20,10 +16,6 @@ LandingMarkerDetector::LandingMarkerDetector()
|
|||
#ifdef WITH_CUDA
|
||||
this->_cuda_impl = new LandingMarkerDetectorCUDAImpl;
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl = new LandingMarkerDetectorIntelImpl;
|
||||
#endif
|
||||
}
|
||||
LandingMarkerDetector::~LandingMarkerDetector()
|
||||
{
|
||||
|
@ -34,10 +26,6 @@ bool LandingMarkerDetector::setupImpl()
|
|||
#ifdef WITH_CUDA
|
||||
return this->_cuda_impl->cudaSetup();
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
return this->_intel_impl->intelSetup();
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -52,13 +40,11 @@ void LandingMarkerDetector::roiCNN(
|
|||
output_labels_
|
||||
);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl->intelRoiCNN(
|
||||
input_rois_,
|
||||
output_labels_
|
||||
);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1,262 +0,0 @@
|
|||
|
||||
#include "BYTETracker.h"
|
||||
#include <fstream>
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
BYTETracker::BYTETracker(int frame_rate, int track_buffer)
|
||||
{
|
||||
track_thresh = 0.5;
|
||||
high_thresh = 0.6;
|
||||
match_thresh = 0.8;
|
||||
|
||||
frame_id = 0;
|
||||
max_time_lost = int(frame_rate / 30.0 * track_buffer);
|
||||
}
|
||||
|
||||
BYTETracker::~BYTETracker()
|
||||
{
|
||||
}
|
||||
|
||||
void BYTETracker::update(TargetsInFrame &tgts)
|
||||
{
|
||||
////////////////// Step 1: Get detections //////////////////
|
||||
this->frame_id++;
|
||||
std::vector<STrack> activated_stracks;
|
||||
std::vector<STrack> refind_stracks;
|
||||
std::vector<STrack> removed_stracks;
|
||||
std::vector<STrack> lost_stracks;
|
||||
std::vector<STrack> detections;
|
||||
std::vector<STrack> detections_low;
|
||||
|
||||
std::vector<STrack> detections_cp;
|
||||
std::vector<STrack> tracked_stracks_swap;
|
||||
std::vector<STrack> resa, resb;
|
||||
std::vector<STrack> output_stracks;
|
||||
|
||||
std::vector<STrack *> unconfirmed;
|
||||
std::vector<STrack *> tracked_stracks;
|
||||
std::vector<STrack *> strack_pool;
|
||||
std::vector<STrack *> r_tracked_stracks;
|
||||
|
||||
if (tgts.targets.size() > 0)
|
||||
{
|
||||
for (int i = 0; i < tgts.targets.size(); i++)
|
||||
{
|
||||
|
||||
tgts.targets[i].tracked_id = 0;
|
||||
tgts.targets[i].has_tid = true;
|
||||
|
||||
std::vector<float> tlbr_;
|
||||
tlbr_.resize(4);
|
||||
tlbr_[0] = tgts.targets[i].cx * tgts.width - tgts.targets[i].w * tgts.width / 2;
|
||||
tlbr_[1] = tgts.targets[i].cy * tgts.height - tgts.targets[i].h * tgts.height / 2;
|
||||
tlbr_[2] = tgts.targets[i].cx * tgts.width + tgts.targets[i].w * tgts.width / 2;
|
||||
tlbr_[3] = tgts.targets[i].cy * tgts.height + tgts.targets[i].h * tgts.height / 2;
|
||||
|
||||
float score = tgts.targets[i].score;
|
||||
|
||||
STrack strack(STrack::tlbr_to_tlwh(tlbr_), score);
|
||||
if (score >= track_thresh)
|
||||
{
|
||||
detections.push_back(strack);
|
||||
}
|
||||
else
|
||||
{
|
||||
detections_low.push_back(strack);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add newly detected tracklets to tracked_stracks
|
||||
for (int i = 0; i < this->tracked_stracks.size(); i++)
|
||||
{
|
||||
if (!this->tracked_stracks[i].is_activated)
|
||||
unconfirmed.push_back(&this->tracked_stracks[i]);
|
||||
else
|
||||
tracked_stracks.push_back(&this->tracked_stracks[i]);
|
||||
}
|
||||
|
||||
////////////////// Step 2: First association, with IoU //////////////////
|
||||
strack_pool = joint_stracks(tracked_stracks, this->lost_stracks);
|
||||
STrack::multi_predict(strack_pool, this->kalman_filter);
|
||||
|
||||
std::vector<std::vector<float>> dists;
|
||||
int dist_size = 0, dist_size_size = 0;
|
||||
dists = iou_distance(strack_pool, detections, dist_size, dist_size_size);
|
||||
|
||||
std::vector<std::vector<int>> matches;
|
||||
std::vector<int> u_track, u_detection;
|
||||
linear_assignment(dists, dist_size, dist_size_size, match_thresh, matches, u_track, u_detection);
|
||||
|
||||
for (int i = 0; i < matches.size(); i++)
|
||||
{
|
||||
STrack *track = strack_pool[matches[i][0]];
|
||||
STrack *det = &detections[matches[i][1]];
|
||||
if (track->state == TrackState::Tracked)
|
||||
{
|
||||
track->update(*det, this->frame_id);
|
||||
activated_stracks.push_back(*track);
|
||||
}
|
||||
else
|
||||
{
|
||||
track->re_activate(*det, this->frame_id, false);
|
||||
refind_stracks.push_back(*track);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////// Step 3: Second association, using low score dets //////////////////
|
||||
for (int i = 0; i < u_detection.size(); i++)
|
||||
{
|
||||
detections_cp.push_back(detections[u_detection[i]]);
|
||||
}
|
||||
detections.clear();
|
||||
detections.assign(detections_low.begin(), detections_low.end());
|
||||
|
||||
for (int i = 0; i < u_track.size(); i++)
|
||||
{
|
||||
if (strack_pool[u_track[i]]->state == TrackState::Tracked)
|
||||
{
|
||||
r_tracked_stracks.push_back(strack_pool[u_track[i]]);
|
||||
}
|
||||
}
|
||||
|
||||
dists.clear();
|
||||
dists = iou_distance(r_tracked_stracks, detections, dist_size, dist_size_size);
|
||||
|
||||
matches.clear();
|
||||
u_track.clear();
|
||||
u_detection.clear();
|
||||
linear_assignment(dists, dist_size, dist_size_size, 0.5, matches, u_track, u_detection);
|
||||
|
||||
for (int i = 0; i < matches.size(); i++)
|
||||
{
|
||||
STrack *track = r_tracked_stracks[matches[i][0]];
|
||||
STrack *det = &detections[matches[i][1]];
|
||||
if (track->state == TrackState::Tracked)
|
||||
{
|
||||
track->update(*det, this->frame_id);
|
||||
activated_stracks.push_back(*track);
|
||||
}
|
||||
else
|
||||
{
|
||||
track->re_activate(*det, this->frame_id, false);
|
||||
refind_stracks.push_back(*track);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < u_track.size(); i++)
|
||||
{
|
||||
STrack *track = r_tracked_stracks[u_track[i]];
|
||||
if (track->state != TrackState::Lost)
|
||||
{
|
||||
track->mark_lost();
|
||||
lost_stracks.push_back(*track);
|
||||
}
|
||||
}
|
||||
|
||||
// Deal with unconfirmed tracks, usually tracks with only one beginning frame
|
||||
detections.clear();
|
||||
detections.assign(detections_cp.begin(), detections_cp.end());
|
||||
|
||||
dists.clear();
|
||||
dists = iou_distance(unconfirmed, detections, dist_size, dist_size_size);
|
||||
|
||||
matches.clear();
|
||||
std::vector<int> u_unconfirmed;
|
||||
u_detection.clear();
|
||||
linear_assignment(dists, dist_size, dist_size_size, 0.7, matches, u_unconfirmed, u_detection);
|
||||
|
||||
for (int i = 0; i < matches.size(); i++)
|
||||
{
|
||||
unconfirmed[matches[i][0]]->update(detections[matches[i][1]], this->frame_id);
|
||||
activated_stracks.push_back(*unconfirmed[matches[i][0]]);
|
||||
}
|
||||
|
||||
for (int i = 0; i < u_unconfirmed.size(); i++)
|
||||
{
|
||||
STrack *track = unconfirmed[u_unconfirmed[i]];
|
||||
track->mark_removed();
|
||||
removed_stracks.push_back(*track);
|
||||
}
|
||||
|
||||
////////////////// Step 4: Init new stracks //////////////////
|
||||
for (int i = 0; i < u_detection.size(); i++)
|
||||
{
|
||||
STrack *track = &detections[u_detection[i]];
|
||||
if (track->score < this->high_thresh)
|
||||
continue;
|
||||
track->activate(this->kalman_filter, this->frame_id);
|
||||
activated_stracks.push_back(*track);
|
||||
}
|
||||
|
||||
////////////////// Step 5: Update state //////////////////
|
||||
for (int i = 0; i < this->lost_stracks.size(); i++)
|
||||
{
|
||||
if (this->frame_id - this->lost_stracks[i].end_frame() > this->max_time_lost)
|
||||
{
|
||||
this->lost_stracks[i].mark_removed();
|
||||
removed_stracks.push_back(this->lost_stracks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < this->tracked_stracks.size(); i++)
|
||||
{
|
||||
if (this->tracked_stracks[i].state == TrackState::Tracked)
|
||||
{
|
||||
tracked_stracks_swap.push_back(this->tracked_stracks[i]);
|
||||
}
|
||||
}
|
||||
this->tracked_stracks.clear();
|
||||
this->tracked_stracks.assign(tracked_stracks_swap.begin(), tracked_stracks_swap.end());
|
||||
|
||||
this->tracked_stracks = joint_stracks(this->tracked_stracks, activated_stracks);
|
||||
this->tracked_stracks = joint_stracks(this->tracked_stracks, refind_stracks);
|
||||
|
||||
// std::cout << activated_stracks.size() << std::endl;
|
||||
|
||||
this->lost_stracks = sub_stracks(this->lost_stracks, this->tracked_stracks);
|
||||
for (int i = 0; i < lost_stracks.size(); i++)
|
||||
{
|
||||
this->lost_stracks.push_back(lost_stracks[i]);
|
||||
}
|
||||
|
||||
this->lost_stracks = sub_stracks(this->lost_stracks, this->removed_stracks);
|
||||
for (int i = 0; i < removed_stracks.size(); i++)
|
||||
{
|
||||
this->removed_stracks.push_back(removed_stracks[i]);
|
||||
}
|
||||
|
||||
remove_duplicate_stracks(resa, resb, this->tracked_stracks, this->lost_stracks);
|
||||
|
||||
this->tracked_stracks.clear();
|
||||
this->tracked_stracks.assign(resa.begin(), resa.end());
|
||||
this->lost_stracks.clear();
|
||||
this->lost_stracks.assign(resb.begin(), resb.end());
|
||||
|
||||
for (int i = 0; i < this->tracked_stracks.size(); i++)
|
||||
{
|
||||
if (this->tracked_stracks[i].is_activated)
|
||||
{
|
||||
output_stracks.push_back(this->tracked_stracks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
tgts.targets.clear();
|
||||
|
||||
for (unsigned long i = 0; i < output_stracks.size(); i++)
|
||||
{
|
||||
std::vector<float> tlwh = output_stracks[i].tlwh;
|
||||
bool vertical = tlwh[2] / tlwh[3] > 1.6;
|
||||
|
||||
if (tlwh[2] * tlwh[3] > 20 && !vertical)
|
||||
{
|
||||
Target tgt;
|
||||
tgt.setBox(tlwh[0], tlwh[1], tlwh[0] + tlwh[2], tlwh[1] + tlwh[3], tgts.width, tgts.height);
|
||||
tgt.setTrackID(output_stracks[i].track_id);
|
||||
tgts.targets.push_back(tgt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
#ifndef __SV_BYTETrack__
|
||||
#define __SV_BYTETrack__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include "STrack.h"
|
||||
|
||||
class detect_result
|
||||
{
|
||||
public:
|
||||
int classId;
|
||||
float confidence;
|
||||
cv::Rect_<float> box;
|
||||
};
|
||||
|
||||
namespace sv {
|
||||
|
||||
class BYTETracker
|
||||
{
|
||||
public:
|
||||
BYTETracker(int frame_rate = 30, int track_buffer = 30);
|
||||
~BYTETracker();
|
||||
|
||||
void update(TargetsInFrame &tgts);
|
||||
cv::Scalar get_color(int idx);
|
||||
|
||||
private:
|
||||
std::vector<STrack*> joint_stracks( std::vector<STrack*> &tlista, std::vector<STrack> &tlistb);
|
||||
std::vector<STrack> joint_stracks( std::vector<STrack> &tlista, std::vector<STrack> &tlistb);
|
||||
|
||||
std::vector<STrack> sub_stracks( std::vector<STrack> &tlista, std::vector<STrack> &tlistb);
|
||||
void remove_duplicate_stracks( std::vector<STrack> &resa, std::vector<STrack> &resb, std::vector<STrack> &stracksa, std::vector<STrack> &stracksb);
|
||||
|
||||
void linear_assignment( std::vector< std::vector<float> > &cost_matrix, int cost_matrix_size, int cost_matrix_size_size, float thresh,
|
||||
std::vector< std::vector<int> > &matches, std::vector<int> &unmatched_a, std::vector<int> &unmatched_b);
|
||||
std::vector< std::vector<float> > iou_distance( std::vector<STrack*> &atracks, std::vector<STrack> &btracks, int &dist_size, int &dist_size_size);
|
||||
std::vector< std::vector<float> > iou_distance( std::vector<STrack> &atracks, std::vector<STrack> &btracks);
|
||||
std::vector< std::vector<float> > ious( std::vector< std::vector<float> > &atlbrs, std::vector< std::vector<float> > &btlbrs);
|
||||
|
||||
double lapjv(const std::vector< std::vector<float> > &cost, std::vector<int> &rowsol, std::vector<int> &colsol,
|
||||
bool extend_cost = false, float cost_limit = LONG_MAX, bool return_cost = true);
|
||||
|
||||
private:
|
||||
|
||||
float track_thresh;
|
||||
float high_thresh;
|
||||
float match_thresh;
|
||||
int frame_id;
|
||||
int max_time_lost;
|
||||
|
||||
std::vector<STrack> tracked_stracks;
|
||||
std::vector<STrack> lost_stracks;
|
||||
std::vector<STrack> removed_stracks;
|
||||
byte_kalman::ByteKalmanFilter kalman_filter;
|
||||
};
|
||||
}
|
||||
#endif
|
|
@ -1,152 +0,0 @@
|
|||
#include "BytekalmanFilter.h"
|
||||
#include <Eigen/Cholesky>
|
||||
|
||||
namespace byte_kalman
|
||||
{
|
||||
const double ByteKalmanFilter::chi2inv95[10] = {
|
||||
0,
|
||||
3.8415,
|
||||
5.9915,
|
||||
7.8147,
|
||||
9.4877,
|
||||
11.070,
|
||||
12.592,
|
||||
14.067,
|
||||
15.507,
|
||||
16.919
|
||||
};
|
||||
ByteKalmanFilter::ByteKalmanFilter()
|
||||
{
|
||||
int ndim = 4;
|
||||
double dt = 1.;
|
||||
|
||||
_motion_mat = Eigen::MatrixXf::Identity(8, 8);
|
||||
for (int i = 0; i < ndim; i++) {
|
||||
_motion_mat(i, ndim + i) = dt;
|
||||
}
|
||||
_update_mat = Eigen::MatrixXf::Identity(4, 8);
|
||||
|
||||
this->_std_weight_position = 1. / 20;
|
||||
this->_std_weight_velocity = 1. / 160;
|
||||
}
|
||||
|
||||
KAL_DATA ByteKalmanFilter::initiate(const DETECTBOX &measurement)
|
||||
{
|
||||
DETECTBOX mean_pos = measurement;
|
||||
DETECTBOX mean_vel;
|
||||
for (int i = 0; i < 4; i++) mean_vel(i) = 0;
|
||||
|
||||
KAL_MEAN mean;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
if (i < 4) mean(i) = mean_pos(i);
|
||||
else mean(i) = mean_vel(i - 4);
|
||||
}
|
||||
|
||||
KAL_MEAN std;
|
||||
std(0) = 2 * _std_weight_position * measurement[3];
|
||||
std(1) = 2 * _std_weight_position * measurement[3];
|
||||
std(2) = 1e-2;
|
||||
std(3) = 2 * _std_weight_position * measurement[3];
|
||||
std(4) = 10 * _std_weight_velocity * measurement[3];
|
||||
std(5) = 10 * _std_weight_velocity * measurement[3];
|
||||
std(6) = 1e-5;
|
||||
std(7) = 10 * _std_weight_velocity * measurement[3];
|
||||
|
||||
KAL_MEAN tmp = std.array().square();
|
||||
KAL_COVA var = tmp.asDiagonal();
|
||||
return std::make_pair(mean, var);
|
||||
}
|
||||
|
||||
void ByteKalmanFilter::predict(KAL_MEAN &mean, KAL_COVA &covariance)
|
||||
{
|
||||
//revise the data;
|
||||
DETECTBOX std_pos;
|
||||
std_pos << _std_weight_position * mean(3),
|
||||
_std_weight_position * mean(3),
|
||||
1e-2,
|
||||
_std_weight_position * mean(3);
|
||||
DETECTBOX std_vel;
|
||||
std_vel << _std_weight_velocity * mean(3),
|
||||
_std_weight_velocity * mean(3),
|
||||
1e-5,
|
||||
_std_weight_velocity * mean(3);
|
||||
KAL_MEAN tmp;
|
||||
tmp.block<1, 4>(0, 0) = std_pos;
|
||||
tmp.block<1, 4>(0, 4) = std_vel;
|
||||
tmp = tmp.array().square();
|
||||
KAL_COVA motion_cov = tmp.asDiagonal();
|
||||
KAL_MEAN mean1 = this->_motion_mat * mean.transpose();
|
||||
KAL_COVA covariance1 = this->_motion_mat * covariance *(_motion_mat.transpose());
|
||||
covariance1 += motion_cov;
|
||||
|
||||
mean = mean1;
|
||||
covariance = covariance1;
|
||||
}
|
||||
|
||||
KAL_HDATA ByteKalmanFilter::project(const KAL_MEAN &mean, const KAL_COVA &covariance)
|
||||
{
|
||||
DETECTBOX std;
|
||||
std << _std_weight_position * mean(3), _std_weight_position * mean(3),
|
||||
1e-1, _std_weight_position * mean(3);
|
||||
KAL_HMEAN mean1 = _update_mat * mean.transpose();
|
||||
KAL_HCOVA covariance1 = _update_mat * covariance * (_update_mat.transpose());
|
||||
Eigen::Matrix<float, 4, 4> diag = std.asDiagonal();
|
||||
diag = diag.array().square().matrix();
|
||||
covariance1 += diag;
|
||||
// covariance1.diagonal() << diag;
|
||||
return std::make_pair(mean1, covariance1);
|
||||
}
|
||||
|
||||
KAL_DATA
|
||||
ByteKalmanFilter::update(
|
||||
const KAL_MEAN &mean,
|
||||
const KAL_COVA &covariance,
|
||||
const DETECTBOX &measurement)
|
||||
{
|
||||
KAL_HDATA pa = project(mean, covariance);
|
||||
KAL_HMEAN projected_mean = pa.first;
|
||||
KAL_HCOVA projected_cov = pa.second;
|
||||
|
||||
//chol_factor, lower =
|
||||
//scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
|
||||
//kalmain_gain =
|
||||
//scipy.linalg.cho_solve((cho_factor, lower),
|
||||
//np.dot(covariance, self._upadte_mat.T).T,
|
||||
//check_finite=False).T
|
||||
Eigen::Matrix<float, 4, 8> B = (covariance * (_update_mat.transpose())).transpose();
|
||||
Eigen::Matrix<float, 8, 4> kalman_gain = (projected_cov.llt().solve(B)).transpose(); // eg.8x4
|
||||
Eigen::Matrix<float, 1, 4> innovation = measurement - projected_mean; //eg.1x4
|
||||
auto tmp = innovation * (kalman_gain.transpose());
|
||||
KAL_MEAN new_mean = (mean.array() + tmp.array()).matrix();
|
||||
KAL_COVA new_covariance = covariance - kalman_gain * projected_cov*(kalman_gain.transpose());
|
||||
return std::make_pair(new_mean, new_covariance);
|
||||
}
|
||||
|
||||
Eigen::Matrix<float, 1, -1>
|
||||
ByteKalmanFilter::gating_distance(
|
||||
const KAL_MEAN &mean,
|
||||
const KAL_COVA &covariance,
|
||||
const std::vector<DETECTBOX> &measurements,
|
||||
bool only_position)
|
||||
{
|
||||
KAL_HDATA pa = this->project(mean, covariance);
|
||||
if (only_position) {
|
||||
printf("not implement!");
|
||||
exit(0);
|
||||
}
|
||||
KAL_HMEAN mean1 = pa.first;
|
||||
KAL_HCOVA covariance1 = pa.second;
|
||||
|
||||
// Eigen::Matrix<float, -1, 4, Eigen::RowMajor> d(size, 4);
|
||||
DETECTBOXSS d(measurements.size(), 4);
|
||||
int pos = 0;
|
||||
for (DETECTBOX box : measurements) {
|
||||
d.row(pos++) = box - mean1;
|
||||
}
|
||||
Eigen::Matrix<float, -1, -1, Eigen::RowMajor> factor = covariance1.llt().matrixL();
|
||||
Eigen::Matrix<float, -1, -1> z = factor.triangularView<Eigen::Lower>().solve<Eigen::OnTheRight>(d).transpose();
|
||||
auto zz = ((z.array())*(z.array())).matrix();
|
||||
auto square_maha = zz.colwise().sum();
|
||||
return square_maha;
|
||||
}
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "dataType.h"
|
||||
|
||||
namespace byte_kalman
|
||||
{
|
||||
class ByteKalmanFilter
|
||||
{
|
||||
public:
|
||||
static const double chi2inv95[10];
|
||||
ByteKalmanFilter();
|
||||
KAL_DATA initiate(const DETECTBOX& measurement);
|
||||
void predict(KAL_MEAN& mean, KAL_COVA& covariance);
|
||||
KAL_HDATA project(const KAL_MEAN& mean, const KAL_COVA& covariance);
|
||||
KAL_DATA update(const KAL_MEAN& mean,
|
||||
const KAL_COVA& covariance,
|
||||
const DETECTBOX& measurement);
|
||||
|
||||
Eigen::Matrix<float, 1, -1> gating_distance(
|
||||
const KAL_MEAN& mean,
|
||||
const KAL_COVA& covariance,
|
||||
const std::vector<DETECTBOX>& measurements,
|
||||
bool only_position = false);
|
||||
|
||||
private:
|
||||
Eigen::Matrix<float, 8, 8, Eigen::RowMajor> _motion_mat;
|
||||
Eigen::Matrix<float, 4, 8, Eigen::RowMajor> _update_mat;
|
||||
float _std_weight_position;
|
||||
float _std_weight_velocity;
|
||||
};
|
||||
}
|
|
@ -1,192 +0,0 @@
|
|||
#include "STrack.h"
|
||||
|
||||
STrack::STrack( std::vector<float> tlwh_, float score)
|
||||
{
|
||||
_tlwh.resize(4);
|
||||
_tlwh.assign(tlwh_.begin(), tlwh_.end());
|
||||
|
||||
is_activated = false;
|
||||
track_id = 0;
|
||||
state = TrackState::New;
|
||||
|
||||
tlwh.resize(4);
|
||||
tlbr.resize(4);
|
||||
|
||||
static_tlwh();
|
||||
static_tlbr();
|
||||
frame_id = 0;
|
||||
tracklet_len = 0;
|
||||
this->score = score;
|
||||
start_frame = 0;
|
||||
}
|
||||
|
||||
STrack::~STrack()
|
||||
{
|
||||
}
|
||||
|
||||
void STrack::activate(byte_kalman::ByteKalmanFilter &kalman_filter, int frame_id)
|
||||
{
|
||||
this->kalman_filter = kalman_filter;
|
||||
this->track_id = this->next_id();
|
||||
|
||||
std::vector<float> _tlwh_tmp(4);
|
||||
_tlwh_tmp[0] = this->_tlwh[0];
|
||||
_tlwh_tmp[1] = this->_tlwh[1];
|
||||
_tlwh_tmp[2] = this->_tlwh[2];
|
||||
_tlwh_tmp[3] = this->_tlwh[3];
|
||||
std::vector<float> xyah = tlwh_to_xyah(_tlwh_tmp);
|
||||
DETECTBOX xyah_box;
|
||||
xyah_box[0] = xyah[0];
|
||||
xyah_box[1] = xyah[1];
|
||||
xyah_box[2] = xyah[2];
|
||||
xyah_box[3] = xyah[3];
|
||||
auto mc = this->kalman_filter.initiate(xyah_box);
|
||||
this->mean = mc.first;
|
||||
this->covariance = mc.second;
|
||||
|
||||
static_tlwh();
|
||||
static_tlbr();
|
||||
|
||||
this->tracklet_len = 0;
|
||||
this->state = TrackState::Tracked;
|
||||
if (frame_id == 1)
|
||||
{
|
||||
this->is_activated = true;
|
||||
}
|
||||
//this->is_activated = true;
|
||||
this->frame_id = frame_id;
|
||||
this->start_frame = frame_id;
|
||||
}
|
||||
|
||||
void STrack::re_activate(STrack &new_track, int frame_id, bool new_id)
|
||||
{
|
||||
std::vector<float> xyah = tlwh_to_xyah(new_track.tlwh);
|
||||
DETECTBOX xyah_box;
|
||||
xyah_box[0] = xyah[0];
|
||||
xyah_box[1] = xyah[1];
|
||||
xyah_box[2] = xyah[2];
|
||||
xyah_box[3] = xyah[3];
|
||||
auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box);
|
||||
this->mean = mc.first;
|
||||
this->covariance = mc.second;
|
||||
|
||||
static_tlwh();
|
||||
static_tlbr();
|
||||
|
||||
this->tracklet_len = 0;
|
||||
this->state = TrackState::Tracked;
|
||||
this->is_activated = true;
|
||||
this->frame_id = frame_id;
|
||||
this->score = new_track.score;
|
||||
if (new_id)
|
||||
this->track_id = next_id();
|
||||
}
|
||||
|
||||
void STrack::update(STrack &new_track, int frame_id)
|
||||
{
|
||||
this->frame_id = frame_id;
|
||||
this->tracklet_len++;
|
||||
|
||||
std::vector<float> xyah = tlwh_to_xyah(new_track.tlwh);
|
||||
DETECTBOX xyah_box;
|
||||
xyah_box[0] = xyah[0];
|
||||
xyah_box[1] = xyah[1];
|
||||
xyah_box[2] = xyah[2];
|
||||
xyah_box[3] = xyah[3];
|
||||
|
||||
auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box);
|
||||
this->mean = mc.first;
|
||||
this->covariance = mc.second;
|
||||
|
||||
static_tlwh();
|
||||
static_tlbr();
|
||||
|
||||
this->state = TrackState::Tracked;
|
||||
this->is_activated = true;
|
||||
|
||||
this->score = new_track.score;
|
||||
}
|
||||
|
||||
void STrack::static_tlwh()
|
||||
{
|
||||
if (this->state == TrackState::New)
|
||||
{
|
||||
tlwh[0] = _tlwh[0];
|
||||
tlwh[1] = _tlwh[1];
|
||||
tlwh[2] = _tlwh[2];
|
||||
tlwh[3] = _tlwh[3];
|
||||
return;
|
||||
}
|
||||
|
||||
tlwh[0] = mean[0];
|
||||
tlwh[1] = mean[1];
|
||||
tlwh[2] = mean[2];
|
||||
tlwh[3] = mean[3];
|
||||
|
||||
tlwh[2] *= tlwh[3];
|
||||
tlwh[0] -= tlwh[2] / 2;
|
||||
tlwh[1] -= tlwh[3] / 2;
|
||||
}
|
||||
|
||||
void STrack::static_tlbr()
|
||||
{
|
||||
tlbr.clear();
|
||||
tlbr.assign(tlwh.begin(), tlwh.end());
|
||||
tlbr[2] += tlbr[0];
|
||||
tlbr[3] += tlbr[1];
|
||||
}
|
||||
|
||||
std::vector<float> STrack::tlwh_to_xyah( std::vector<float> tlwh_tmp)
|
||||
{
|
||||
std::vector<float> tlwh_output = tlwh_tmp;
|
||||
tlwh_output[0] += tlwh_output[2] / 2;
|
||||
tlwh_output[1] += tlwh_output[3] / 2;
|
||||
tlwh_output[2] /= tlwh_output[3];
|
||||
return tlwh_output;
|
||||
}
|
||||
|
||||
std::vector<float> STrack::to_xyah()
|
||||
{
|
||||
return tlwh_to_xyah(tlwh);
|
||||
}
|
||||
|
||||
std::vector<float> STrack::tlbr_to_tlwh( std::vector<float> &tlbr)
|
||||
{
|
||||
tlbr[2] -= tlbr[0];
|
||||
tlbr[3] -= tlbr[1];
|
||||
return tlbr;
|
||||
}
|
||||
|
||||
void STrack::mark_lost()
|
||||
{
|
||||
state = TrackState::Lost;
|
||||
}
|
||||
|
||||
void STrack::mark_removed()
|
||||
{
|
||||
state = TrackState::Removed;
|
||||
}
|
||||
|
||||
int STrack::next_id()
|
||||
{
|
||||
static int _count = 0;
|
||||
_count++;
|
||||
return _count;
|
||||
}
|
||||
|
||||
int STrack::end_frame()
|
||||
{
|
||||
return this->frame_id;
|
||||
}
|
||||
|
||||
void STrack::multi_predict( std::vector<STrack*> &stracks, byte_kalman::ByteKalmanFilter &kalman_filter)
|
||||
{
|
||||
for (int i = 0; i < stracks.size(); i++)
|
||||
{
|
||||
if (stracks[i]->state != TrackState::Tracked)
|
||||
{
|
||||
stracks[i]->mean[7] = 0;
|
||||
}
|
||||
kalman_filter.predict(stracks[i]->mean, stracks[i]->covariance);
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include "BytekalmanFilter.h"
|
||||
|
||||
enum TrackState { New = 0, Tracked, Lost, Removed };
|
||||
|
||||
class STrack
|
||||
{
|
||||
public:
|
||||
STrack( std::vector<float> tlwh_, float score);
|
||||
~STrack();
|
||||
|
||||
std::vector<float> static tlbr_to_tlwh( std::vector<float> &tlbr);
|
||||
void static multi_predict( std::vector<STrack*> &stracks, byte_kalman::ByteKalmanFilter &kalman_filter);
|
||||
void static_tlwh();
|
||||
void static_tlbr();
|
||||
std::vector<float> tlwh_to_xyah( std::vector<float> tlwh_tmp);
|
||||
std::vector<float> to_xyah();
|
||||
void mark_lost();
|
||||
void mark_removed();
|
||||
int next_id();
|
||||
int end_frame();
|
||||
|
||||
void activate(byte_kalman::ByteKalmanFilter &kalman_filter, int frame_id);
|
||||
void re_activate(STrack &new_track, int frame_id, bool new_id = false);
|
||||
void update(STrack &new_track, int frame_id);
|
||||
|
||||
public:
|
||||
bool is_activated;
|
||||
int track_id;
|
||||
int state;
|
||||
|
||||
std::vector<float> _tlwh;
|
||||
std::vector<float> tlwh;
|
||||
std::vector<float> tlbr;
|
||||
int frame_id;
|
||||
int tracklet_len;
|
||||
int start_frame;
|
||||
|
||||
KAL_MEAN mean;
|
||||
KAL_COVA covariance;
|
||||
float score;
|
||||
|
||||
private:
|
||||
byte_kalman::ByteKalmanFilter kalman_filter;
|
||||
};
|
|
@ -1,27 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
|
||||
|
||||
typedef Eigen::Matrix<float, 1, 4, Eigen::RowMajor> DETECTBOX;
|
||||
typedef Eigen::Matrix<float, -1, 4, Eigen::RowMajor> DETECTBOXSS;
|
||||
|
||||
|
||||
//Kalmanfilter
|
||||
typedef Eigen::Matrix<float, 8, 8, Eigen::RowMajor> KAL_FILTER;
|
||||
typedef Eigen::Matrix<float, 1, 8, Eigen::RowMajor> KAL_MEAN;
|
||||
typedef Eigen::Matrix<float, 8, 8, Eigen::RowMajor> KAL_COVA;
|
||||
typedef Eigen::Matrix<float, 1, 4, Eigen::RowMajor> KAL_HMEAN;
|
||||
typedef Eigen::Matrix<float, 4, 4, Eigen::RowMajor> KAL_HCOVA;
|
||||
using KAL_DATA = std::pair<KAL_MEAN, KAL_COVA>;
|
||||
using KAL_HDATA = std::pair<KAL_HMEAN, KAL_HCOVA>;
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,343 +0,0 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "lapjv.h"
|
||||
|
||||
/** Column-reduction and reduction transfer for a dense cost matrix.
|
||||
*/
|
||||
int_t _ccrrt_dense(const uint_t n, cost_t *cost[],
|
||||
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
|
||||
{
|
||||
int_t n_free_rows;
|
||||
boolean *unique;
|
||||
|
||||
for (uint_t i = 0; i < n; i++) {
|
||||
x[i] = -1;
|
||||
v[i] = LARGE;
|
||||
y[i] = 0;
|
||||
}
|
||||
for (uint_t i = 0; i < n; i++) {
|
||||
for (uint_t j = 0; j < n; j++) {
|
||||
const cost_t c = cost[i][j];
|
||||
if (c < v[j]) {
|
||||
v[j] = c;
|
||||
y[j] = i;
|
||||
}
|
||||
PRINTF("i=%d, j=%d, c[i,j]=%f, v[j]=%f y[j]=%d\n", i, j, c, v[j], y[j]);
|
||||
}
|
||||
}
|
||||
PRINT_COST_ARRAY(v, n);
|
||||
PRINT_INDEX_ARRAY(y, n);
|
||||
NEW(unique, boolean, n);
|
||||
memset(unique, TRUE, n);
|
||||
{
|
||||
int_t j = n;
|
||||
do {
|
||||
j--;
|
||||
const int_t i = y[j];
|
||||
if (x[i] < 0) {
|
||||
x[i] = j;
|
||||
}
|
||||
else {
|
||||
unique[i] = FALSE;
|
||||
y[j] = -1;
|
||||
}
|
||||
} while (j > 0);
|
||||
}
|
||||
n_free_rows = 0;
|
||||
for (uint_t i = 0; i < n; i++) {
|
||||
if (x[i] < 0) {
|
||||
free_rows[n_free_rows++] = i;
|
||||
}
|
||||
else if (unique[i]) {
|
||||
const int_t j = x[i];
|
||||
cost_t min = LARGE;
|
||||
for (uint_t j2 = 0; j2 < n; j2++) {
|
||||
if (j2 == (uint_t)j) {
|
||||
continue;
|
||||
}
|
||||
const cost_t c = cost[i][j2] - v[j2];
|
||||
if (c < min) {
|
||||
min = c;
|
||||
}
|
||||
}
|
||||
PRINTF("v[%d] = %f - %f\n", j, v[j], min);
|
||||
v[j] -= min;
|
||||
}
|
||||
}
|
||||
FREE(unique);
|
||||
return n_free_rows;
|
||||
}
|
||||
|
||||
|
||||
/** Augmenting row reduction for a dense cost matrix.
|
||||
*/
|
||||
int_t _carr_dense(
|
||||
const uint_t n, cost_t *cost[],
|
||||
const uint_t n_free_rows,
|
||||
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
|
||||
{
|
||||
uint_t current = 0;
|
||||
int_t new_free_rows = 0;
|
||||
uint_t rr_cnt = 0;
|
||||
PRINT_INDEX_ARRAY(x, n);
|
||||
PRINT_INDEX_ARRAY(y, n);
|
||||
PRINT_COST_ARRAY(v, n);
|
||||
PRINT_INDEX_ARRAY(free_rows, n_free_rows);
|
||||
while (current < n_free_rows) {
|
||||
int_t i0;
|
||||
int_t j1, j2;
|
||||
cost_t v1, v2, v1_new;
|
||||
boolean v1_lowers;
|
||||
|
||||
rr_cnt++;
|
||||
PRINTF("current = %d rr_cnt = %d\n", current, rr_cnt);
|
||||
const int_t free_i = free_rows[current++];
|
||||
j1 = 0;
|
||||
v1 = cost[free_i][0] - v[0];
|
||||
j2 = -1;
|
||||
v2 = LARGE;
|
||||
for (uint_t j = 1; j < n; j++) {
|
||||
PRINTF("%d = %f %d = %f\n", j1, v1, j2, v2);
|
||||
const cost_t c = cost[free_i][j] - v[j];
|
||||
if (c < v2) {
|
||||
if (c >= v1) {
|
||||
v2 = c;
|
||||
j2 = j;
|
||||
}
|
||||
else {
|
||||
v2 = v1;
|
||||
v1 = c;
|
||||
j2 = j1;
|
||||
j1 = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
i0 = y[j1];
|
||||
v1_new = v[j1] - (v2 - v1);
|
||||
v1_lowers = v1_new < v[j1];
|
||||
PRINTF("%d %d 1=%d,%f 2=%d,%f v1'=%f(%d,%g) \n", free_i, i0, j1, v1, j2, v2, v1_new, v1_lowers, v[j1] - v1_new);
|
||||
if (rr_cnt < current * n) {
|
||||
if (v1_lowers) {
|
||||
v[j1] = v1_new;
|
||||
}
|
||||
else if (i0 >= 0 && j2 >= 0) {
|
||||
j1 = j2;
|
||||
i0 = y[j2];
|
||||
}
|
||||
if (i0 >= 0) {
|
||||
if (v1_lowers) {
|
||||
free_rows[--current] = i0;
|
||||
}
|
||||
else {
|
||||
free_rows[new_free_rows++] = i0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
PRINTF("rr_cnt=%d >= %d (current=%d * n=%d)\n", rr_cnt, current * n, current, n);
|
||||
if (i0 >= 0) {
|
||||
free_rows[new_free_rows++] = i0;
|
||||
}
|
||||
}
|
||||
x[free_i] = j1;
|
||||
y[j1] = free_i;
|
||||
}
|
||||
return new_free_rows;
|
||||
}
|
||||
|
||||
|
||||
/** Find columns with minimum d[j] and put them on the SCAN list.
|
||||
*/
|
||||
uint_t _find_dense(const uint_t n, uint_t lo, cost_t *d, int_t *cols, int_t *y)
|
||||
{
|
||||
uint_t hi = lo + 1;
|
||||
cost_t mind = d[cols[lo]];
|
||||
for (uint_t k = hi; k < n; k++) {
|
||||
int_t j = cols[k];
|
||||
if (d[j] <= mind) {
|
||||
if (d[j] < mind) {
|
||||
hi = lo;
|
||||
mind = d[j];
|
||||
}
|
||||
cols[k] = cols[hi];
|
||||
cols[hi++] = j;
|
||||
}
|
||||
}
|
||||
return hi;
|
||||
}
|
||||
|
||||
|
||||
// Scan all columns in TODO starting from arbitrary column in SCAN
|
||||
// and try to decrease d of the TODO columns using the SCAN column.
|
||||
int_t _scan_dense(const uint_t n, cost_t *cost[],
|
||||
uint_t *plo, uint_t*phi,
|
||||
cost_t *d, int_t *cols, int_t *pred,
|
||||
int_t *y, cost_t *v)
|
||||
{
|
||||
uint_t lo = *plo;
|
||||
uint_t hi = *phi;
|
||||
cost_t h, cred_ij;
|
||||
|
||||
while (lo != hi) {
|
||||
int_t j = cols[lo++];
|
||||
const int_t i = y[j];
|
||||
const cost_t mind = d[j];
|
||||
h = cost[i][j] - v[j] - mind;
|
||||
PRINTF("i=%d j=%d h=%f\n", i, j, h);
|
||||
// For all columns in TODO
|
||||
for (uint_t k = hi; k < n; k++) {
|
||||
j = cols[k];
|
||||
cred_ij = cost[i][j] - v[j] - h;
|
||||
if (cred_ij < d[j]) {
|
||||
d[j] = cred_ij;
|
||||
pred[j] = i;
|
||||
if (cred_ij == mind) {
|
||||
if (y[j] < 0) {
|
||||
return j;
|
||||
}
|
||||
cols[k] = cols[hi];
|
||||
cols[hi++] = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*plo = lo;
|
||||
*phi = hi;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/** Single iteration of modified Dijkstra shortest path algorithm as explained in the JV paper.
|
||||
*
|
||||
* This is a dense matrix version.
|
||||
*
|
||||
* \return The closest free column index.
|
||||
*/
|
||||
int_t find_path_dense(
|
||||
const uint_t n, cost_t *cost[],
|
||||
const int_t start_i,
|
||||
int_t *y, cost_t *v,
|
||||
int_t *pred)
|
||||
{
|
||||
uint_t lo = 0, hi = 0;
|
||||
int_t final_j = -1;
|
||||
uint_t n_ready = 0;
|
||||
int_t *cols;
|
||||
cost_t *d;
|
||||
|
||||
NEW(cols, int_t, n);
|
||||
NEW(d, cost_t, n);
|
||||
|
||||
for (uint_t i = 0; i < n; i++) {
|
||||
cols[i] = i;
|
||||
pred[i] = start_i;
|
||||
d[i] = cost[start_i][i] - v[i];
|
||||
}
|
||||
PRINT_COST_ARRAY(d, n);
|
||||
while (final_j == -1) {
|
||||
// No columns left on the SCAN list.
|
||||
if (lo == hi) {
|
||||
PRINTF("%d..%d -> find\n", lo, hi);
|
||||
n_ready = lo;
|
||||
hi = _find_dense(n, lo, d, cols, y);
|
||||
PRINTF("check %d..%d\n", lo, hi);
|
||||
PRINT_INDEX_ARRAY(cols, n);
|
||||
for (uint_t k = lo; k < hi; k++) {
|
||||
const int_t j = cols[k];
|
||||
if (y[j] < 0) {
|
||||
final_j = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (final_j == -1) {
|
||||
PRINTF("%d..%d -> scan\n", lo, hi);
|
||||
final_j = _scan_dense(
|
||||
n, cost, &lo, &hi, d, cols, pred, y, v);
|
||||
PRINT_COST_ARRAY(d, n);
|
||||
PRINT_INDEX_ARRAY(cols, n);
|
||||
PRINT_INDEX_ARRAY(pred, n);
|
||||
}
|
||||
}
|
||||
|
||||
PRINTF("found final_j=%d\n", final_j);
|
||||
PRINT_INDEX_ARRAY(cols, n);
|
||||
{
|
||||
const cost_t mind = d[cols[lo]];
|
||||
for (uint_t k = 0; k < n_ready; k++) {
|
||||
const int_t j = cols[k];
|
||||
v[j] += d[j] - mind;
|
||||
}
|
||||
}
|
||||
|
||||
FREE(cols);
|
||||
FREE(d);
|
||||
|
||||
return final_j;
|
||||
}
|
||||
|
||||
|
||||
/** Augment for a dense cost matrix.
|
||||
*/
|
||||
int_t _ca_dense(
|
||||
const uint_t n, cost_t *cost[],
|
||||
const uint_t n_free_rows,
|
||||
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
|
||||
{
|
||||
int_t *pred;
|
||||
|
||||
NEW(pred, int_t, n);
|
||||
|
||||
for (int_t *pfree_i = free_rows; pfree_i < free_rows + n_free_rows; pfree_i++) {
|
||||
int_t i = -1, j;
|
||||
uint_t k = 0;
|
||||
|
||||
PRINTF("looking at free_i=%d\n", *pfree_i);
|
||||
j = find_path_dense(n, cost, *pfree_i, y, v, pred);
|
||||
ASSERT(j >= 0);
|
||||
ASSERT(j < n);
|
||||
while (i != *pfree_i) {
|
||||
PRINTF("augment %d\n", j);
|
||||
PRINT_INDEX_ARRAY(pred, n);
|
||||
i = pred[j];
|
||||
PRINTF("y[%d]=%d -> %d\n", j, y[j], i);
|
||||
y[j] = i;
|
||||
PRINT_INDEX_ARRAY(x, n);
|
||||
SWAP_INDICES(j, x[i]);
|
||||
k++;
|
||||
if (k >= n) {
|
||||
ASSERT(FALSE);
|
||||
}
|
||||
}
|
||||
}
|
||||
FREE(pred);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/** Solve dense sparse LAP.
|
||||
*/
|
||||
int lapjv_internal(
|
||||
const uint_t n, cost_t *cost[],
|
||||
int_t *x, int_t *y)
|
||||
{
|
||||
int ret;
|
||||
int_t *free_rows;
|
||||
cost_t *v;
|
||||
|
||||
NEW(free_rows, int_t, n);
|
||||
NEW(v, cost_t, n);
|
||||
ret = _ccrrt_dense(n, cost, free_rows, x, y, v);
|
||||
int i = 0;
|
||||
while (ret > 0 && i < 2) {
|
||||
ret = _carr_dense(n, cost, ret, free_rows, x, y, v);
|
||||
i++;
|
||||
}
|
||||
if (ret > 0) {
|
||||
ret = _ca_dense(n, cost, ret, free_rows, x, y, v);
|
||||
}
|
||||
FREE(v);
|
||||
FREE(free_rows);
|
||||
return ret;
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
#ifndef LAPJV_H
|
||||
#define LAPJV_H
|
||||
|
||||
#define LARGE 1000000
|
||||
|
||||
#if !defined TRUE
|
||||
#define TRUE 1
|
||||
#endif
|
||||
#if !defined FALSE
|
||||
#define FALSE 0
|
||||
#endif
|
||||
|
||||
#define NEW(x, t, n) if ((x = (t *)malloc(sizeof(t) * (n))) == 0) { return -1; }
|
||||
#define FREE(x) if (x != 0) { free(x); x = 0; }
|
||||
#define SWAP_INDICES(a, b) { int_t _temp_index = a; a = b; b = _temp_index; }
|
||||
|
||||
#if 0
|
||||
#include <assert.h>
|
||||
#define ASSERT(cond) assert(cond)
|
||||
#define PRINTF(fmt, ...) printf(fmt, ##__VA_ARGS__)
|
||||
#define PRINT_COST_ARRAY(a, n) \
|
||||
while (1) { \
|
||||
printf(#a" = ["); \
|
||||
if ((n) > 0) { \
|
||||
printf("%f", (a)[0]); \
|
||||
for (uint_t j = 1; j < n; j++) { \
|
||||
printf(", %f", (a)[j]); \
|
||||
} \
|
||||
} \
|
||||
printf("]\n"); \
|
||||
break; \
|
||||
}
|
||||
#define PRINT_INDEX_ARRAY(a, n) \
|
||||
while (1) { \
|
||||
printf(#a" = ["); \
|
||||
if ((n) > 0) { \
|
||||
printf("%d", (a)[0]); \
|
||||
for (uint_t j = 1; j < n; j++) { \
|
||||
printf(", %d", (a)[j]); \
|
||||
} \
|
||||
} \
|
||||
printf("]\n"); \
|
||||
break; \
|
||||
}
|
||||
#else
|
||||
#define ASSERT(cond)
|
||||
#define PRINTF(fmt, ...)
|
||||
#define PRINT_COST_ARRAY(a, n)
|
||||
#define PRINT_INDEX_ARRAY(a, n)
|
||||
#endif
|
||||
|
||||
|
||||
typedef signed int int_t;
|
||||
typedef unsigned int uint_t;
|
||||
typedef double cost_t;
|
||||
typedef char boolean;
|
||||
typedef enum fp_t { FP_1 = 1, FP_2 = 2, FP_DYNAMIC = 3 } fp_t;
|
||||
|
||||
extern int_t lapjv_internal(
|
||||
const uint_t n, cost_t *cost[],
|
||||
int_t *x, int_t *y);
|
||||
|
||||
#endif // LAPJV_H
|
|
@ -1,432 +0,0 @@
|
|||
#include "BYTETracker.h"
|
||||
#include "lapjv.h"
|
||||
|
||||
namespace sv {
|
||||
|
||||
std::vector<STrack*> BYTETracker::joint_stracks( std::vector<STrack*> &tlista, std::vector<STrack> &tlistb)
|
||||
{
|
||||
std::map<int, int> exists;
|
||||
std::vector<STrack*> res;
|
||||
for (int i = 0; i < tlista.size(); i++)
|
||||
{
|
||||
exists.insert(std::pair<int, int>(tlista[i]->track_id, 1));
|
||||
res.push_back(tlista[i]);
|
||||
}
|
||||
for (int i = 0; i < tlistb.size(); i++)
|
||||
{
|
||||
int tid = tlistb[i].track_id;
|
||||
if (!exists[tid] || exists.count(tid) == 0)
|
||||
{
|
||||
exists[tid] = 1;
|
||||
res.push_back(&tlistb[i]);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<STrack> BYTETracker::joint_stracks( std::vector<STrack> &tlista, std::vector<STrack> &tlistb)
|
||||
{
|
||||
std::map<int, int> exists;
|
||||
std::vector<STrack> res;
|
||||
for (int i = 0; i < tlista.size(); i++)
|
||||
{
|
||||
exists.insert(std::pair<int, int>(tlista[i].track_id, 1));
|
||||
res.push_back(tlista[i]);
|
||||
}
|
||||
for (int i = 0; i < tlistb.size(); i++)
|
||||
{
|
||||
int tid = tlistb[i].track_id;
|
||||
if (!exists[tid] || exists.count(tid) == 0)
|
||||
{
|
||||
exists[tid] = 1;
|
||||
res.push_back(tlistb[i]);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<STrack> BYTETracker::sub_stracks( std::vector<STrack> &tlista, std::vector<STrack> &tlistb)
|
||||
{
|
||||
std::map<int, STrack> stracks;
|
||||
for (int i = 0; i < tlista.size(); i++)
|
||||
{
|
||||
stracks.insert(std::pair<int, STrack>(tlista[i].track_id, tlista[i]));
|
||||
}
|
||||
for (int i = 0; i < tlistb.size(); i++)
|
||||
{
|
||||
int tid = tlistb[i].track_id;
|
||||
if (stracks.count(tid) != 0)
|
||||
{
|
||||
stracks.erase(tid);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<STrack> res;
|
||||
std::map<int, STrack>::iterator it;
|
||||
for (it = stracks.begin(); it != stracks.end(); ++it)
|
||||
{
|
||||
res.push_back(it->second);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void BYTETracker::remove_duplicate_stracks( std::vector<STrack> &resa, std::vector<STrack> &resb, std::vector<STrack> &stracksa, std::vector<STrack> &stracksb)
|
||||
{
|
||||
std::vector< std::vector<float> > pdist = iou_distance(stracksa, stracksb);
|
||||
std::vector<std::pair<int, int> > pairs;
|
||||
for (int i = 0; i < pdist.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < pdist[i].size(); j++)
|
||||
{
|
||||
if (pdist[i][j] < 0.15)
|
||||
{
|
||||
pairs.push_back(std::pair<int, int>(i, j));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<int> dupa, dupb;
|
||||
for (int i = 0; i < pairs.size(); i++)
|
||||
{
|
||||
int timep = stracksa[pairs[i].first].frame_id - stracksa[pairs[i].first].start_frame;
|
||||
int timeq = stracksb[pairs[i].second].frame_id - stracksb[pairs[i].second].start_frame;
|
||||
if (timep > timeq)
|
||||
dupb.push_back(pairs[i].second);
|
||||
else
|
||||
dupa.push_back(pairs[i].first);
|
||||
}
|
||||
|
||||
for (int i = 0; i < stracksa.size(); i++)
|
||||
{
|
||||
std::vector<int>::iterator iter = find(dupa.begin(), dupa.end(), i);
|
||||
if (iter == dupa.end())
|
||||
{
|
||||
resa.push_back(stracksa[i]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < stracksb.size(); i++)
|
||||
{
|
||||
std::vector<int>::iterator iter = find(dupb.begin(), dupb.end(), i);
|
||||
if (iter == dupb.end())
|
||||
{
|
||||
resb.push_back(stracksb[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BYTETracker::linear_assignment( std::vector< std::vector<float> > &cost_matrix, int cost_matrix_size, int cost_matrix_size_size, float thresh,
|
||||
std::vector< std::vector<int> > &matches, std::vector<int> &unmatched_a, std::vector<int> &unmatched_b)
|
||||
{
|
||||
if (cost_matrix.size() == 0)
|
||||
{
|
||||
for (int i = 0; i < cost_matrix_size; i++)
|
||||
{
|
||||
unmatched_a.push_back(i);
|
||||
}
|
||||
for (int i = 0; i < cost_matrix_size_size; i++)
|
||||
{
|
||||
unmatched_b.push_back(i);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<int> rowsol; std::vector<int> colsol;
|
||||
float c = lapjv(cost_matrix, rowsol, colsol, true, thresh);
|
||||
for (int i = 0; i < rowsol.size(); i++)
|
||||
{
|
||||
if (rowsol[i] >= 0)
|
||||
{
|
||||
std::vector<int> match;
|
||||
match.push_back(i);
|
||||
match.push_back(rowsol[i]);
|
||||
matches.push_back(match);
|
||||
}
|
||||
else
|
||||
{
|
||||
unmatched_a.push_back(i);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < colsol.size(); i++)
|
||||
{
|
||||
if (colsol[i] < 0)
|
||||
{
|
||||
unmatched_b.push_back(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector< std::vector<float> > BYTETracker::ious( std::vector< std::vector<float> > &atlbrs, std::vector< std::vector<float> > &btlbrs)
|
||||
{
|
||||
std::vector< std::vector<float> > ious;
|
||||
if (atlbrs.size()*btlbrs.size() == 0)
|
||||
return ious;
|
||||
|
||||
ious.resize(atlbrs.size());
|
||||
for (int i = 0; i < ious.size(); i++)
|
||||
{
|
||||
ious[i].resize(btlbrs.size());
|
||||
}
|
||||
|
||||
//bbox_ious
|
||||
for (int k = 0; k < btlbrs.size(); k++)
|
||||
{
|
||||
std::vector<float> ious_tmp;
|
||||
float box_area = (btlbrs[k][2] - btlbrs[k][0] + 1)*(btlbrs[k][3] - btlbrs[k][1] + 1);
|
||||
for (int n = 0; n < atlbrs.size(); n++)
|
||||
{
|
||||
float iw = cv::min(atlbrs[n][2], btlbrs[k][2]) - cv::max(atlbrs[n][0], btlbrs[k][0]) + 1;
|
||||
if (iw > 0)
|
||||
{
|
||||
float ih = cv::min(atlbrs[n][3], btlbrs[k][3]) - cv::max(atlbrs[n][1], btlbrs[k][1]) + 1;
|
||||
if(ih > 0)
|
||||
{
|
||||
float ua = (atlbrs[n][2] - atlbrs[n][0] + 1)*(atlbrs[n][3] - atlbrs[n][1] + 1) + box_area - iw * ih;
|
||||
ious[n][k] = iw * ih / ua;
|
||||
}
|
||||
else
|
||||
{
|
||||
ious[n][k] = 0.0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ious[n][k] = 0.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ious;
|
||||
}
|
||||
|
||||
std::vector< std::vector<float> > BYTETracker::iou_distance( std::vector<STrack*> &atracks, std::vector<STrack> &btracks, int &dist_size, int &dist_size_size)
|
||||
{
|
||||
std::vector< std::vector<float> > cost_matrix;
|
||||
if (atracks.size() * btracks.size() == 0)
|
||||
{
|
||||
dist_size = atracks.size();
|
||||
dist_size_size = btracks.size();
|
||||
return cost_matrix;
|
||||
}
|
||||
std::vector< std::vector<float> > atlbrs, btlbrs;
|
||||
for (int i = 0; i < atracks.size(); i++)
|
||||
{
|
||||
atlbrs.push_back(atracks[i]->tlbr);
|
||||
}
|
||||
for (int i = 0; i < btracks.size(); i++)
|
||||
{
|
||||
btlbrs.push_back(btracks[i].tlbr);
|
||||
}
|
||||
|
||||
dist_size = atracks.size();
|
||||
dist_size_size = btracks.size();
|
||||
|
||||
std::vector< std::vector<float> > _ious = ious(atlbrs, btlbrs);
|
||||
|
||||
for (int i = 0; i < _ious.size();i++)
|
||||
{
|
||||
std::vector<float> _iou;
|
||||
for (int j = 0; j < _ious[i].size(); j++)
|
||||
{
|
||||
_iou.push_back(1 - _ious[i][j]);
|
||||
}
|
||||
cost_matrix.push_back(_iou);
|
||||
}
|
||||
|
||||
return cost_matrix;
|
||||
}
|
||||
|
||||
std::vector< std::vector<float> > BYTETracker::iou_distance( std::vector<STrack> &atracks, std::vector<STrack> &btracks)
|
||||
{
|
||||
std::vector< std::vector<float> > atlbrs, btlbrs;
|
||||
for (int i = 0; i < atracks.size(); i++)
|
||||
{
|
||||
atlbrs.push_back(atracks[i].tlbr);
|
||||
}
|
||||
for (int i = 0; i < btracks.size(); i++)
|
||||
{
|
||||
btlbrs.push_back(btracks[i].tlbr);
|
||||
}
|
||||
|
||||
std::vector< std::vector<float> > _ious = ious(atlbrs, btlbrs);
|
||||
std::vector< std::vector<float> > cost_matrix;
|
||||
for (int i = 0; i < _ious.size(); i++)
|
||||
{
|
||||
std::vector<float> _iou;
|
||||
for (int j = 0; j < _ious[i].size(); j++)
|
||||
{
|
||||
_iou.push_back(1 - _ious[i][j]);
|
||||
}
|
||||
cost_matrix.push_back(_iou);
|
||||
}
|
||||
|
||||
return cost_matrix;
|
||||
}
|
||||
|
||||
double BYTETracker::lapjv(const std::vector< std::vector<float> > &cost, std::vector<int> &rowsol, std::vector<int> &colsol,
|
||||
bool extend_cost, float cost_limit, bool return_cost)
|
||||
{
|
||||
std::vector< std::vector<float> > cost_c;
|
||||
cost_c.assign(cost.begin(), cost.end());
|
||||
|
||||
std::vector< std::vector<float> > cost_c_extended;
|
||||
|
||||
int n_rows = cost.size();
|
||||
int n_cols = cost[0].size();
|
||||
rowsol.resize(n_rows);
|
||||
colsol.resize(n_cols);
|
||||
|
||||
int n = 0;
|
||||
if (n_rows == n_cols)
|
||||
{
|
||||
n = n_rows;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!extend_cost)
|
||||
{
|
||||
std::cout << "set extend_cost=True" << std::endl;
|
||||
system("pause");
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
if (extend_cost || cost_limit < LONG_MAX)
|
||||
{
|
||||
n = n_rows + n_cols;
|
||||
cost_c_extended.resize(n);
|
||||
for (int i = 0; i < cost_c_extended.size(); i++)
|
||||
cost_c_extended[i].resize(n);
|
||||
|
||||
if (cost_limit < LONG_MAX)
|
||||
{
|
||||
for (int i = 0; i < cost_c_extended.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < cost_c_extended[i].size(); j++)
|
||||
{
|
||||
cost_c_extended[i][j] = cost_limit / 2.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
float cost_max = -1;
|
||||
for (int i = 0; i < cost_c.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < cost_c[i].size(); j++)
|
||||
{
|
||||
if (cost_c[i][j] > cost_max)
|
||||
cost_max = cost_c[i][j];
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < cost_c_extended.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < cost_c_extended[i].size(); j++)
|
||||
{
|
||||
cost_c_extended[i][j] = cost_max + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = n_rows; i < cost_c_extended.size(); i++)
|
||||
{
|
||||
for (int j = n_cols; j < cost_c_extended[i].size(); j++)
|
||||
{
|
||||
cost_c_extended[i][j] = 0;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < n_rows; i++)
|
||||
{
|
||||
for (int j = 0; j < n_cols; j++)
|
||||
{
|
||||
cost_c_extended[i][j] = cost_c[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
cost_c.clear();
|
||||
cost_c.assign(cost_c_extended.begin(), cost_c_extended.end());
|
||||
}
|
||||
|
||||
double **cost_ptr;
|
||||
cost_ptr = new double *[sizeof(double *) * n];
|
||||
for (int i = 0; i < n; i++)
|
||||
cost_ptr[i] = new double[sizeof(double) * n];
|
||||
|
||||
for (int i = 0; i < n; i++)
|
||||
{
|
||||
for (int j = 0; j < n; j++)
|
||||
{
|
||||
cost_ptr[i][j] = cost_c[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
int* x_c = new int[sizeof(int) * n];
|
||||
int *y_c = new int[sizeof(int) * n];
|
||||
|
||||
int ret = lapjv_internal(n, cost_ptr, x_c, y_c);
|
||||
if (ret != 0)
|
||||
{
|
||||
std::cout << "Calculate Wrong!" << std::endl;
|
||||
system("pause");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
double opt = 0.0;
|
||||
|
||||
if (n != n_rows)
|
||||
{
|
||||
for (int i = 0; i < n; i++)
|
||||
{
|
||||
if (x_c[i] >= n_cols)
|
||||
x_c[i] = -1;
|
||||
if (y_c[i] >= n_rows)
|
||||
y_c[i] = -1;
|
||||
}
|
||||
for (int i = 0; i < n_rows; i++)
|
||||
{
|
||||
rowsol[i] = x_c[i];
|
||||
}
|
||||
for (int i = 0; i < n_cols; i++)
|
||||
{
|
||||
colsol[i] = y_c[i];
|
||||
}
|
||||
|
||||
if (return_cost)
|
||||
{
|
||||
for (int i = 0; i < rowsol.size(); i++)
|
||||
{
|
||||
if (rowsol[i] != -1)
|
||||
{
|
||||
//cout << i << "\t" << rowsol[i] << "\t" << cost_ptr[i][rowsol[i]] << endl;
|
||||
opt += cost_ptr[i][rowsol[i]];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (return_cost)
|
||||
{
|
||||
for (int i = 0; i < rowsol.size(); i++)
|
||||
{
|
||||
opt += cost_ptr[i][rowsol[i]];
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < n; i++)
|
||||
{
|
||||
delete[]cost_ptr[i];
|
||||
}
|
||||
delete[]cost_ptr;
|
||||
delete[]x_c;
|
||||
delete[]y_c;
|
||||
|
||||
return opt;
|
||||
}
|
||||
|
||||
cv::Scalar BYTETracker::get_color(int idx)
|
||||
{
|
||||
idx += 3;
|
||||
return cv::Scalar(37 * idx % 255, 17 * idx % 255, 29 * idx % 255);
|
||||
}
|
||||
}
|
|
@ -1,377 +0,0 @@
|
|||
#include "sort.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
#include "gason.h"
|
||||
#include "sv_util.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
namespace sv {
|
||||
|
||||
|
||||
KalmanFilter::KalmanFilter()
|
||||
{
|
||||
this->_chi2inv95 << 3.8415, 5.9915, 7.8147, 9.4877, 11.070, 12.592, 14.067, 15.507, 16.919;
|
||||
this->_F = MatrixXd::Identity(8, 8);
|
||||
for (int i=0; i<4; i++)
|
||||
{
|
||||
this->_F(i,i+4) = 1.; //1
|
||||
}
|
||||
this->_H = MatrixXd::Identity(4, 8);
|
||||
this->_std_weight_position = 1. / 20; //1./20
|
||||
this->_std_weight_vel = 1. / 160; //1./160
|
||||
}
|
||||
|
||||
KalmanFilter::~KalmanFilter()
|
||||
{
|
||||
}
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::initiate(Vector4d &bbox)
|
||||
{
|
||||
Matrix<double,8,1> mean;
|
||||
Matrix<double,4,1> zero_vector;
|
||||
zero_vector.setZero();
|
||||
mean << bbox(0), bbox(1), (double)bbox(2) / (double)bbox(3), bbox(3), zero_vector;
|
||||
VectorXd stds(8);
|
||||
stds << 2 * this->_std_weight_position * mean(3), 2 * this->_std_weight_position * mean(3), 0.01, 2 * this->_std_weight_position * mean(3), \
|
||||
10 * this->_std_weight_vel * mean(3), 10 * this->_std_weight_vel * mean(3), 1e-5, 10 * this->_std_weight_vel * mean(3);
|
||||
MatrixXd squared = stds.array().square();
|
||||
Matrix<double, 8, 8> covariances;
|
||||
covariances = squared.asDiagonal();
|
||||
return make_pair(mean, covariances);
|
||||
}
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::update(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances, sv::Box &box)
|
||||
{
|
||||
VectorXd measurement(4);
|
||||
double a = (double)(box.x2-box.x1) / (double)(box.y2-box.y1);
|
||||
measurement << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, a, box.y2-box.y1;
|
||||
pair<Matrix<double, 4, 1>, Matrix<double, 4, 4> > projected = project(mean, covariances);
|
||||
Matrix<double, 4, 1> projected_mean = projected.first;
|
||||
Matrix<double, 4, 4> projected_cov = projected.second;
|
||||
|
||||
Eigen::LLT<Eigen::MatrixXd> chol_factor(projected_cov);
|
||||
MatrixXd Kalman_gain = (chol_factor.solve((covariances * this->_H.transpose()).transpose())).transpose();
|
||||
|
||||
VectorXd innovation = measurement - projected_mean;
|
||||
Matrix<double, 8, 1> new_mean = mean + Kalman_gain *innovation;
|
||||
Matrix<double, 8, 8> new_covariances = covariances - Kalman_gain * projected_cov * Kalman_gain.transpose();
|
||||
|
||||
return make_pair(new_mean, new_covariances);
|
||||
}
|
||||
|
||||
pair<Matrix<double, 4, 1>, Matrix<double, 4, 4> > KalmanFilter::project(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances)
|
||||
{
|
||||
VectorXd stds(4);
|
||||
stds << this->_std_weight_position * mean(3), this->_std_weight_position * mean(3), 0.1, this->_std_weight_position * mean(3);
|
||||
MatrixXd squared = stds.array().square();
|
||||
MatrixXd R = squared.asDiagonal();
|
||||
|
||||
Matrix<double, 4, 1> pro_mean = this->_H * mean;
|
||||
Matrix<double, 4, 4> pro_covariances = this->_H * covariances * this->_H.transpose() + R;
|
||||
return make_pair(pro_mean, pro_covariances);
|
||||
}
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::predict(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances)
|
||||
{
|
||||
VectorXd stds(8);
|
||||
stds << this->_std_weight_position * mean(3), this->_std_weight_position * mean(3), 1e-2, this->_std_weight_position * mean(3), \
|
||||
this->_std_weight_vel * mean(3), this->_std_weight_vel * mean(3), 1e-5, this->_std_weight_vel * mean(3); // a = 0.01
|
||||
MatrixXd squared = stds.array().square();
|
||||
MatrixXd Q = squared.asDiagonal();
|
||||
Matrix<double, 8, 1> pre_mean = this->_F * mean;
|
||||
Matrix<double, 8, 8> pre_cov = this->_F * covariances * this->_F.transpose()+Q;//+Q
|
||||
return make_pair(pre_mean, pre_cov);
|
||||
}
|
||||
|
||||
|
||||
SORT::~SORT()
|
||||
{
|
||||
}
|
||||
|
||||
void SORT::update(TargetsInFrame& tgts)
|
||||
{
|
||||
KalmanFilter kf;
|
||||
if (! this->_tracklets.size() || tgts.targets.size() == 0)
|
||||
{
|
||||
Vector4d bbox;
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[i].getBox(box);
|
||||
Tracklet tracklet;
|
||||
tracklet.id = ++ this->_next_tracklet_id;
|
||||
tgts.targets[i].tracked_id = this->_next_tracklet_id;
|
||||
tgts.targets[i].has_tid = true;
|
||||
|
||||
tracklet.bbox << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, box.x2-box.x1, box.y2-box.y1; // x,y,w,h; center(x,y)
|
||||
|
||||
tracklet.age = 0;
|
||||
tracklet.hits = 1;
|
||||
//tracklet.misses = 0;
|
||||
tracklet.frame_id = tgts.frame_id;
|
||||
tracklet.category_id = tgts.targets[i].category_id;
|
||||
if (tgts.frame_id == 0)
|
||||
{
|
||||
tracklet.tentative = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
tracklet.tentative = true;
|
||||
}
|
||||
// initate the motion
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > motion = kf.initiate(tracklet.bbox);
|
||||
tracklet.mean = motion.first;
|
||||
tracklet.covariance = motion.second;
|
||||
|
||||
this->_tracklets.push_back(tracklet);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
tgts.targets[i].tracked_id = 0;
|
||||
tgts.targets[i].has_tid = true;
|
||||
}
|
||||
|
||||
vector<int> match_det(tgts.targets.size(), -1);
|
||||
// predict the next state of each tracklet
|
||||
for (auto& tracklet : this->_tracklets)
|
||||
{
|
||||
tracklet.age++;
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > motion = kf.predict(tracklet.mean, tracklet.covariance);
|
||||
tracklet.bbox << motion.first(0), motion.first(1), motion.first(2) * motion.first(3), motion.first(3);
|
||||
tracklet.mean = motion.first;
|
||||
tracklet.covariance = motion.second;
|
||||
}
|
||||
|
||||
// Match the detections to the existing tracklets
|
||||
vector<vector<double> > iouMatrix(this->_tracklets.size(), vector<double> (tgts.targets.size(), 0));
|
||||
for (int i=0; i<this->_tracklets.size(); i++)
|
||||
{
|
||||
for (int j=0; j<tgts.targets.size(); j++)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[j].getBox(box);
|
||||
iouMatrix[i][j] = this->_iou(this->_tracklets[i], box);
|
||||
}
|
||||
}
|
||||
|
||||
vector<pair<int, int> > matches = this->_hungarian(iouMatrix);
|
||||
for (auto& match : matches)
|
||||
{
|
||||
int trackletIndex = match.first;
|
||||
int detectionIndex = match.second;
|
||||
if (trackletIndex >= 0 && detectionIndex >= 0)
|
||||
{
|
||||
if (iouMatrix[match.first][match.second] <= 1-_iou_threshold) // iou_thrshold
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[detectionIndex].getBox(box);
|
||||
this->_tracklets[trackletIndex].age = 0;
|
||||
this->_tracklets[trackletIndex].hits++;
|
||||
this->_tracklets[trackletIndex].frame_id = tgts.frame_id;
|
||||
this->_tracklets[trackletIndex].bbox << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, box.x2-box.x1, box.y2-box.y1;
|
||||
tgts.targets[detectionIndex].tracked_id = this->_tracklets[trackletIndex].id;
|
||||
match_det[detectionIndex] = trackletIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector <vector<double>> ().swap(iouMatrix);
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
if (match_det[i] == -1)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[i].getBox(box);
|
||||
Tracklet tracklet;
|
||||
tracklet.id = ++ this->_next_tracklet_id;
|
||||
tracklet.bbox << box.x1+(box.x2-box.x1)/2, (double)(box.y1+(box.y2-box.y1)/2), box.x2-box.x1, box.y2-box.y1; // c_x, c_y, w, h
|
||||
tracklet.age = 0;
|
||||
tracklet.hits = 1;
|
||||
tracklet.frame_id = tgts.frame_id;
|
||||
tracklet.category_id = tgts.targets[i].category_id;
|
||||
tracklet.tentative = true;
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > new_motion = kf.initiate(tracklet.bbox);
|
||||
tracklet.mean = new_motion.first;
|
||||
tracklet.covariance = new_motion.second;
|
||||
|
||||
tgts.targets[i].tracked_id = this->_next_tracklet_id;
|
||||
tgts.targets[i].has_tid = true;
|
||||
this->_tracklets.push_back(tracklet);
|
||||
}
|
||||
else
|
||||
{
|
||||
sv::Box box;
|
||||
int track_id = match_det[i];
|
||||
tgts.targets[i].getBox(box);
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > updated = kf.update(this->_tracklets[track_id].mean, this->_tracklets[track_id].covariance, box);
|
||||
this->_tracklets[track_id].mean = updated.first;
|
||||
this->_tracklets[track_id].covariance = updated.second;
|
||||
}
|
||||
}
|
||||
|
||||
//sift tracklets
|
||||
for (auto& tracklet : this->_tracklets)
|
||||
{
|
||||
if (tracklet.hits >= _min_hits)
|
||||
{
|
||||
tracklet.tentative = false;
|
||||
}
|
||||
if ((tgts.frame_id-tracklet.frame_id <= _max_age) && !(tracklet.tentative && tracklet.frame_id != tgts.frame_id))
|
||||
{
|
||||
_new_tracklets.push_back(tracklet);
|
||||
}
|
||||
}
|
||||
_tracklets = _new_tracklets;
|
||||
std::vector <Tracklet> ().swap(_new_tracklets);
|
||||
}
|
||||
}
|
||||
|
||||
vector<Tracklet> SORT::getTracklets() const
|
||||
{
|
||||
return this->_tracklets;
|
||||
}
|
||||
|
||||
double SORT::_iou(Tracklet& tracklet, sv::Box& box)
|
||||
{
|
||||
double trackletX1 = tracklet.bbox(0)-tracklet.bbox(2)/2;
|
||||
double trackletY1 = tracklet.bbox(1)-tracklet.bbox(3)/2;
|
||||
double trackletX2 = tracklet.bbox(0) + tracklet.bbox(2)/2;
|
||||
double trackletY2 = tracklet.bbox(1) + tracklet.bbox(3)/2;
|
||||
|
||||
double detectionX1 = box.x1;
|
||||
double detectionY1 = box.y1;
|
||||
double detectionX2 = box.x2;
|
||||
double detectionY2 = box.y2;
|
||||
|
||||
double intersectionX1 = max(trackletX1, detectionX1);
|
||||
double intersectionY1 = max(trackletY1, detectionY1);
|
||||
double intersectionX2 = min(trackletX2, detectionX2);
|
||||
double intersectionY2 = min(trackletY2, detectionY2);
|
||||
|
||||
double w = (intersectionX2-intersectionX1 > 0.0) ? (intersectionX2-intersectionX1) : 0.0;
|
||||
double h = (intersectionY2-intersectionY1 > 0.0) ? (intersectionY2-intersectionY1) : 0.0;
|
||||
double intersectionArea = w * h;
|
||||
|
||||
double trackletArea = tracklet.bbox(2) * tracklet.bbox(3);
|
||||
|
||||
double detectionArea = (box.x2-box.x1) * (box.y2-box.y1);
|
||||
double unionArea = trackletArea + detectionArea - intersectionArea;
|
||||
double iou = (1 - intersectionArea / unionArea * 1.0);
|
||||
|
||||
return iou;
|
||||
}
|
||||
|
||||
// Function to find the minimum element in a vector
|
||||
double SORT::_findMin(const std::vector<double>& vec) {
|
||||
double minVal = std::numeric_limits<double>::max();
|
||||
for (double val : vec) {
|
||||
if (val < minVal) {
|
||||
minVal = val;
|
||||
}
|
||||
}
|
||||
return minVal;
|
||||
}
|
||||
|
||||
// Function to subtract the minimum value from each row of the cost matrix
|
||||
void SORT::_subtractMinFromRows(std::vector<std::vector<double>>& costMatrix) {
|
||||
for (auto& row : costMatrix) {
|
||||
double minVal = _findMin(row);
|
||||
for (double& val : row) {
|
||||
val -= minVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function to subtract the minimum value from each column of the cost matrix
|
||||
void SORT::_subtractMinFromCols(std::vector<std::vector<double>>& costMatrix) {
|
||||
for (size_t col = 0; col < costMatrix[0].size(); ++col) {
|
||||
double minVal = std::numeric_limits<double>::max();
|
||||
for (size_t row = 0; row < costMatrix.size(); ++row) {
|
||||
if (costMatrix[row][col] < minVal) {
|
||||
minVal = costMatrix[row][col];
|
||||
}
|
||||
}
|
||||
for (size_t row = 0; row < costMatrix.size(); ++row) {
|
||||
costMatrix[row][col] -= minVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function to find a matching using the Hungarian algorithm
|
||||
vector<pair<int, int> > SORT::_hungarian(vector<vector<double> > costMatrix)
|
||||
{
|
||||
size_t numRows = costMatrix.size();
|
||||
size_t numCols = costMatrix[0].size();
|
||||
|
||||
//transpose the matrix if necessary
|
||||
const bool transposed = numCols > numRows;
|
||||
if (transposed) {
|
||||
vector<vector<double>> transposedMatrix(numCols, vector<double>(numRows));
|
||||
for (int i = 0; i < numRows; i++)
|
||||
{
|
||||
for (int j = 0; j < numCols; j++)
|
||||
{
|
||||
transposedMatrix[j][i] = costMatrix[i][j];
|
||||
}
|
||||
}
|
||||
costMatrix = transposedMatrix;
|
||||
swap(numRows, numCols);
|
||||
}
|
||||
// Determine the larger dimension for matching
|
||||
size_t maxDim = std::max(numRows, numCols);
|
||||
|
||||
// Create a square cost matrix by padding with zeros if necessary
|
||||
std::vector<std::vector<double>> squareMatrix(maxDim, std::vector<double>(maxDim, 0.0));
|
||||
for (size_t row = 0; row < numRows; ++row) {
|
||||
for (size_t col = 0; col < numCols; ++col) {
|
||||
squareMatrix[row][col] = costMatrix[row][col];
|
||||
}
|
||||
}
|
||||
|
||||
// Subtract the minimum value from each row and column
|
||||
_subtractMinFromRows(squareMatrix);
|
||||
_subtractMinFromCols(squareMatrix);
|
||||
|
||||
// Initialize the assignment vectors with -1 values
|
||||
std::vector<int> rowAssignment(maxDim, -1);
|
||||
std::vector<int> colAssignment(maxDim, -1);
|
||||
|
||||
// Perform the matching
|
||||
for (size_t row = 0; row < maxDim; ++row) {
|
||||
std::vector<bool> visitedCols(maxDim, false);
|
||||
for (size_t col = 0; col < maxDim; ++col) {
|
||||
if (squareMatrix[row][col] == 0 && colAssignment[col] == -1) {
|
||||
rowAssignment[row] = col;
|
||||
colAssignment[col] = row;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the assignment vectors to pair<int, int> format
|
||||
std::vector<std::pair<int, int>> assignmentPairs;
|
||||
for (size_t row = 0; row < numRows; ++row) {
|
||||
int col = rowAssignment[row];
|
||||
if (col != -1) {
|
||||
if (col >= numCols) {
|
||||
col = -1;
|
||||
}
|
||||
assignmentPairs.emplace_back(row, col);
|
||||
}
|
||||
}
|
||||
if (transposed) {
|
||||
for (auto& assignment : assignmentPairs)
|
||||
{
|
||||
swap(assignment.first, assignment.second);
|
||||
}
|
||||
}
|
||||
return assignmentPairs;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,77 +0,0 @@
|
|||
#ifndef __SV_SORT__
|
||||
#define __SV_SORT__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/aruco.hpp>
|
||||
#include <opencv2/tracking.hpp>
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
|
||||
namespace sv {
|
||||
|
||||
|
||||
|
||||
// define the tracklet struct to store the tracked objects.
|
||||
struct Tracklet
|
||||
{
|
||||
/* data */
|
||||
public:
|
||||
Eigen::Vector4d bbox; // double x, y, w, h;
|
||||
int id = 0;
|
||||
int age;
|
||||
int hits;
|
||||
int misses;
|
||||
int frame_id = 0;
|
||||
int category_id;
|
||||
bool tentative;
|
||||
std::vector<double> features;
|
||||
Eigen::Matrix<double, 8, 1> mean;
|
||||
Eigen::Matrix<double, 8, 8> covariance;
|
||||
};
|
||||
|
||||
|
||||
class KalmanFilter {
|
||||
public:
|
||||
KalmanFilter();
|
||||
~KalmanFilter();
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > initiate(Eigen::Vector4d &bbox);
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > update(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances, Box &box);
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > predict(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances);
|
||||
std::pair<Eigen::Matrix<double, 4, 1>, Eigen::Matrix<double, 4, 4> > project(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances);
|
||||
private:
|
||||
Eigen::Matrix<double, 8, 8> _F;
|
||||
Eigen::Matrix<double, 4, 8> _H;
|
||||
Eigen::Matrix<double, 9, 1> _chi2inv95;
|
||||
double _std_weight_position;
|
||||
double _std_weight_vel;
|
||||
};
|
||||
|
||||
|
||||
class SORT {
|
||||
public:
|
||||
SORT(double iou_threshold, int max_age, int min_hits): _iou_threshold(iou_threshold), _max_age(max_age), _min_hits(min_hits), _next_tracklet_id(0) {};
|
||||
~SORT();
|
||||
void update(TargetsInFrame &tgts);
|
||||
std::vector<Tracklet> getTracklets() const;
|
||||
private:
|
||||
double _iou(Tracklet &tracklet, Box &box);
|
||||
std::vector<std::pair<int,int>> _hungarian(std::vector<std::vector<double>> costMatrix);
|
||||
double _findMin(const std::vector<double>& vec);
|
||||
void _subtractMinFromRows(std::vector<std::vector<double>>& costMatrix);
|
||||
void _subtractMinFromCols(std::vector<std::vector<double>>& costMatrix);
|
||||
//bool _augment(const std::vector<std::vector<double>>& costMatrix, int row, std::vector<int>& rowMatch, std::vector<int>& colMatch, std::vector<bool>& visited);
|
||||
|
||||
double _iou_threshold;
|
||||
int _max_age;
|
||||
int _min_hits;
|
||||
int _next_tracklet_id;
|
||||
std::vector <Tracklet> _tracklets;
|
||||
std::vector <Tracklet> _new_tracklets;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
#endif
|
|
@ -5,8 +5,6 @@
|
|||
#include <vector>
|
||||
#include "gason.h"
|
||||
#include "sv_util.h"
|
||||
#include "sort.h"
|
||||
#include "BYTETracker.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
@ -18,22 +16,15 @@ MultipleObjectTracker::MultipleObjectTracker()
|
|||
{
|
||||
this->_params_loaded = false;
|
||||
this->_sort_impl = NULL;
|
||||
this->_bytetrack_impl = NULL;
|
||||
}
|
||||
MultipleObjectTracker::~MultipleObjectTracker()
|
||||
{
|
||||
if (this->_sort_impl)
|
||||
delete this->_sort_impl;
|
||||
else if (this->_bytetrack_impl)
|
||||
delete this->_bytetrack_impl;
|
||||
}
|
||||
|
||||
sv::TargetsInFrame MultipleObjectTracker::track(cv::Mat img_, TargetsInFrame& tgts_)
|
||||
void MultipleObjectTracker::track(cv::Mat img_, TargetsInFrame& tgts_)
|
||||
{
|
||||
sv::TargetsInFrame person_tgts(tgts_.frame_id);
|
||||
person_tgts.width = img_.size().width;
|
||||
person_tgts.height = img_.size().height;
|
||||
|
||||
if (!this->_params_loaded)
|
||||
{
|
||||
this->_load();
|
||||
|
@ -42,28 +33,8 @@ sv::TargetsInFrame MultipleObjectTracker::track(cv::Mat img_, TargetsInFrame& tg
|
|||
if ("sort" == this->_algorithm && this->_sort_impl)
|
||||
{
|
||||
this->_detector->detect(img_, tgts_);
|
||||
for (auto target : tgts_.targets)
|
||||
{
|
||||
if (target.category_id == 0)
|
||||
{
|
||||
person_tgts.targets.push_back(target);
|
||||
}
|
||||
}
|
||||
this->_sort_impl->update(person_tgts);
|
||||
this->_sort_impl->update(tgts_);
|
||||
}
|
||||
else if ("bytetrack" == this->_algorithm && this->_bytetrack_impl)
|
||||
{
|
||||
this->_detector->detect(img_, tgts_);
|
||||
for (auto target : tgts_.targets)
|
||||
{
|
||||
if (target.category_id == 0)
|
||||
{
|
||||
person_tgts.targets.push_back(target);
|
||||
}
|
||||
}
|
||||
this->_bytetrack_impl->update(person_tgts);
|
||||
}
|
||||
return person_tgts;
|
||||
}
|
||||
|
||||
void MultipleObjectTracker::init(CommonObjectDetector* detector_)
|
||||
|
@ -77,10 +48,6 @@ void MultipleObjectTracker::init(CommonObjectDetector* detector_)
|
|||
{
|
||||
this->_sort_impl = new SORT(this->_iou_thres, this->_max_age, this->_min_hits);
|
||||
}
|
||||
else if("bytetrack" == this->_algorithm)
|
||||
{
|
||||
this->_bytetrack_impl = new BYTETracker(this->_frame_rate, this->_track_buffer);
|
||||
}
|
||||
this->_detector = detector_;
|
||||
}
|
||||
|
||||
|
@ -104,7 +71,7 @@ void MultipleObjectTracker::_load()
|
|||
else
|
||||
this->_with_reid = false;
|
||||
|
||||
//std::cout << "with_reid: " << this->_with_reid << std::endl;
|
||||
std::cout << "with_reid: " << this->_with_reid << std::endl;
|
||||
}
|
||||
else if ("reid_input_size" == std::string(i->key) && i->value.getTag() == JSON_ARRAY) {
|
||||
int jcnt = 0;
|
||||
|
@ -117,39 +84,395 @@ void MultipleObjectTracker::_load()
|
|||
}
|
||||
jcnt += 1;
|
||||
}
|
||||
//std::cout << "reid_input_w: " << this->_reid_input_w << std::endl;
|
||||
//std::cout << "reid_input_h: " << this->_reid_input_h << std::endl;
|
||||
std::cout << "reid_input_w: " << this->_reid_input_w << std::endl;
|
||||
std::cout << "reid_input_h: " << this->_reid_input_h << std::endl;
|
||||
}
|
||||
else if ("reid_num_samples" == std::string(i->key)) {
|
||||
this->_reid_num_samples = i->value.toNumber();
|
||||
//std::cout << "reid_num_samples: " << this->_reid_num_samples << std::endl;
|
||||
std::cout << "reid_num_samples: " << this->_reid_num_samples << std::endl;
|
||||
}
|
||||
else if ("reid_match_thres" == std::string(i->key)) {
|
||||
this->_reid_match_thres = i->value.toNumber();
|
||||
//std::cout << "reid_match_thres: " << this->_reid_match_thres << std::endl;
|
||||
std::cout << "reid_match_thres: " << this->_reid_match_thres << std::endl;
|
||||
}
|
||||
else if ("iou_thres" == std::string(i->key)) {
|
||||
this->_iou_thres = i->value.toNumber();
|
||||
//std::cout << "iou_thres: " << this->_iou_thres << std::endl;
|
||||
std::cout << "iou_thres: " << this->_iou_thres << std::endl;
|
||||
}
|
||||
else if ("max_age" == std::string(i->key)) {
|
||||
this->_max_age = i->value.toNumber();
|
||||
//std::cout << "max_age: " << this->_max_age << std::endl;
|
||||
std::cout << "max_age: " << this->_max_age << std::endl;
|
||||
}
|
||||
else if ("min_hits" == std::string(i->key)) {
|
||||
this->_min_hits = i->value.toNumber();
|
||||
//std::cout << "min_hits: " << this->_min_hits << std::endl;
|
||||
}
|
||||
else if ("frame_rate" == std::string(i->key)) {
|
||||
this->_frame_rate = i->value.toNumber();
|
||||
//std::cout << "max_age: " << this->_max_age << std::endl;
|
||||
}
|
||||
else if ("track_buffer" == std::string(i->key)) {
|
||||
this->_track_buffer = i->value.toNumber();
|
||||
//std::cout << "min_hits: " << this->_min_hits << std::endl;
|
||||
std::cout << "min_hits: " << this->_min_hits << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
KalmanFilter::KalmanFilter()
|
||||
{
|
||||
this->_chi2inv95 << 3.8415, 5.9915, 7.8147, 9.4877, 11.070, 12.592, 14.067, 15.507, 16.919;
|
||||
this->_F = MatrixXd::Identity(8, 8);
|
||||
for (int i=0; i<4; i++)
|
||||
{
|
||||
this->_F(i,i+4) = 1.; //1
|
||||
}
|
||||
this->_H = MatrixXd::Identity(4, 8);
|
||||
this->_std_weight_position = 1. / 20; //1./20
|
||||
this->_std_weight_vel = 1. / 160; //1./160
|
||||
}
|
||||
|
||||
KalmanFilter::~KalmanFilter()
|
||||
{
|
||||
}
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::initiate(Vector4d &bbox)
|
||||
{
|
||||
Matrix<double,8,1> mean;
|
||||
Matrix<double,4,1> zero_vector;
|
||||
zero_vector.setZero();
|
||||
mean << bbox(0), bbox(1), (double)bbox(2) / (double)bbox(3), bbox(3), zero_vector;
|
||||
VectorXd stds(8);
|
||||
stds << 2 * this->_std_weight_position * mean(3), 2 * this->_std_weight_position * mean(3), 0.01, 2 * this->_std_weight_position * mean(3), \
|
||||
10 * this->_std_weight_vel * mean(3), 10 * this->_std_weight_vel * mean(3), 1e-5, 10 * this->_std_weight_vel * mean(3);
|
||||
MatrixXd squared = stds.array().square();
|
||||
Matrix<double, 8, 8> covariances;
|
||||
covariances = squared.asDiagonal();
|
||||
return make_pair(mean, covariances);
|
||||
}
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::update(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances, sv::Box &box)
|
||||
{
|
||||
VectorXd measurement(4);
|
||||
double a = (double)(box.x2-box.x1) / (double)(box.y2-box.y1);
|
||||
measurement << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, a, box.y2-box.y1;
|
||||
pair<Matrix<double, 4, 1>, Matrix<double, 4, 4> > projected = project(mean, covariances);
|
||||
Matrix<double, 4, 1> projected_mean = projected.first;
|
||||
Matrix<double, 4, 4> projected_cov = projected.second;
|
||||
|
||||
Eigen::LLT<Eigen::MatrixXd> chol_factor(projected_cov);
|
||||
MatrixXd Kalman_gain = (chol_factor.solve((covariances * this->_H.transpose()).transpose())).transpose();
|
||||
|
||||
VectorXd innovation = measurement - projected_mean;
|
||||
Matrix<double, 8, 1> new_mean = mean + Kalman_gain *innovation;
|
||||
Matrix<double, 8, 8> new_covariances = covariances - Kalman_gain * projected_cov * Kalman_gain.transpose();
|
||||
|
||||
return make_pair(new_mean, new_covariances);
|
||||
}
|
||||
|
||||
pair<Matrix<double, 4, 1>, Matrix<double, 4, 4> > KalmanFilter::project(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances)
|
||||
{
|
||||
VectorXd stds(4);
|
||||
stds << this->_std_weight_position * mean(3), this->_std_weight_position * mean(3), 0.1, this->_std_weight_position * mean(3);
|
||||
MatrixXd squared = stds.array().square();
|
||||
MatrixXd R = squared.asDiagonal();
|
||||
|
||||
Matrix<double, 4, 1> pro_mean = this->_H * mean;
|
||||
Matrix<double, 4, 4> pro_covariances = this->_H * covariances * this->_H.transpose() + R;
|
||||
return make_pair(pro_mean, pro_covariances);
|
||||
}
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > KalmanFilter::predict(Matrix<double, 8, 1> mean, Matrix<double, 8, 8> covariances)
|
||||
{
|
||||
VectorXd stds(8);
|
||||
stds << this->_std_weight_position * mean(3), this->_std_weight_position * mean(3), 1e-2, this->_std_weight_position * mean(3), \
|
||||
this->_std_weight_vel * mean(3), this->_std_weight_vel * mean(3), 1e-5, this->_std_weight_vel * mean(3); // a = 0.01
|
||||
MatrixXd squared = stds.array().square();
|
||||
MatrixXd Q = squared.asDiagonal();
|
||||
Matrix<double, 8, 1> pre_mean = this->_F * mean;
|
||||
Matrix<double, 8, 8> pre_cov = this->_F * covariances * this->_F.transpose()+Q;//+Q
|
||||
return make_pair(pre_mean, pre_cov);
|
||||
}
|
||||
|
||||
|
||||
SORT::~SORT()
|
||||
{
|
||||
}
|
||||
|
||||
void SORT::update(TargetsInFrame& tgts)
|
||||
{
|
||||
sv::KalmanFilter kf;
|
||||
if (! this->_tracklets.size() || tgts.targets.size() == 0)
|
||||
{
|
||||
Vector4d bbox;
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[i].getBox(box);
|
||||
Tracklet tracklet;
|
||||
tracklet.id = ++ this->_next_tracklet_id;
|
||||
tgts.targets[i].tracked_id = this->_next_tracklet_id;
|
||||
tgts.targets[i].has_tid = true;
|
||||
|
||||
tracklet.bbox << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, box.x2-box.x1, box.y2-box.y1; // x,y,w,h; center(x,y)
|
||||
|
||||
tracklet.age = 0;
|
||||
tracklet.hits = 1;
|
||||
tracklet.misses = 0;
|
||||
tracklet.frame_id = tgts.frame_id;
|
||||
tracklet.category_id = tgts.targets[i].category_id;
|
||||
tracklet.tentative = true;
|
||||
|
||||
// initate the motion
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > motion = kf.initiate(tracklet.bbox);
|
||||
tracklet.mean = motion.first;
|
||||
tracklet.covariance = motion.second;
|
||||
|
||||
this->_tracklets.push_back(tracklet);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// cout << "frame id:" << tgts.frame_id << endl;
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
tgts.targets[i].tracked_id = 0;
|
||||
tgts.targets[i].has_tid = true;
|
||||
}
|
||||
|
||||
vector<int> match_det(tgts.targets.size(), -1);
|
||||
// predict the next state of each tracklet
|
||||
for (auto& tracklet : this->_tracklets)
|
||||
{
|
||||
tracklet.age++;
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > motion = kf.predict(tracklet.mean, tracklet.covariance);
|
||||
tracklet.bbox << motion.first(0), motion.first(1), motion.first(2) * motion.first(3), motion.first(3);
|
||||
tracklet.mean = motion.first;
|
||||
tracklet.covariance = motion.second;
|
||||
}
|
||||
|
||||
// Match the detections to the existing tracklets
|
||||
vector<vector<double> > iouMatrix(this->_tracklets.size(), vector<double> (tgts.targets.size(), 0));
|
||||
for (int i=0; i<this->_tracklets.size(); i++)
|
||||
{
|
||||
for (int j=0; j<tgts.targets.size(); j++)
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[j].getBox(box);
|
||||
iouMatrix[i][j] = this->_iou(this->_tracklets[i], box);
|
||||
}
|
||||
}
|
||||
|
||||
vector<pair<int, int> > matches = this->_hungarian(iouMatrix);
|
||||
for (auto& match : matches)
|
||||
{
|
||||
int trackletIndex = match.first;
|
||||
int detectionIndex = match.second;
|
||||
if (trackletIndex >= 0 && detectionIndex >= 0)
|
||||
{
|
||||
if (iouMatrix[match.first][match.second] <= 1-_iou_threshold) // iou_thrshold
|
||||
{
|
||||
sv::Box box;
|
||||
tgts.targets[detectionIndex].getBox(box);
|
||||
this->_tracklets[trackletIndex].age = 0;
|
||||
this->_tracklets[trackletIndex].hits++;
|
||||
this->_tracklets[trackletIndex].frame_id = tgts.frame_id;
|
||||
this->_tracklets[trackletIndex].bbox << box.x1+(box.x2-box.x1)/2, box.y1+(box.y2-box.y1)/2, box.x2-box.x1, box.y2-box.y1;
|
||||
tgts.targets[detectionIndex].tracked_id = this->_tracklets[trackletIndex].id;
|
||||
match_det[detectionIndex] = trackletIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector <vector<double>> ().swap(iouMatrix);
|
||||
for (int i=0; i<tgts.targets.size(); i++)
|
||||
{
|
||||
// cout << "match_det: index: " << i << " value: " << match_det[i] << endl;
|
||||
if (match_det[i] == -1)
|
||||
{
|
||||
// cout << "create new tracklet." << endl;
|
||||
sv::Box box;
|
||||
tgts.targets[i].getBox(box);
|
||||
Tracklet tracklet;
|
||||
tracklet.id = ++ this->_next_tracklet_id;
|
||||
tracklet.bbox << box.x1+(box.x2-box.x1)/2, (double)(box.y1+(box.y2-box.y1)/2), box.x2-box.x1, box.y2-box.y1;
|
||||
tracklet.age = 0;
|
||||
tracklet.hits = 1;
|
||||
tracklet.misses = 0;
|
||||
tracklet.frame_id = tgts.frame_id;
|
||||
tracklet.category_id = tgts.targets[i].category_id;
|
||||
tracklet.tentative = true;
|
||||
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > new_motion = kf.initiate(tracklet.bbox);
|
||||
tracklet.mean = new_motion.first;
|
||||
tracklet.covariance = new_motion.second;
|
||||
|
||||
tgts.targets[i].tracked_id = this->_next_tracklet_id;
|
||||
tgts.targets[i].has_tid = true;
|
||||
this->_tracklets.push_back(tracklet);
|
||||
}
|
||||
else
|
||||
{
|
||||
sv::Box box;
|
||||
int track_id = match_det[i];
|
||||
tgts.targets[i].getBox(box);
|
||||
pair<Matrix<double, 8, 1>, Matrix<double, 8, 8> > updated = kf.update(this->_tracklets[track_id].mean, this->_tracklets[track_id].covariance, box);
|
||||
this->_tracklets[track_id].mean = updated.first;
|
||||
this->_tracklets[track_id].covariance = updated.second;
|
||||
}
|
||||
}
|
||||
|
||||
//sift tracklets
|
||||
for (auto& tracklet : this->_tracklets)
|
||||
{
|
||||
if (tracklet.hits >= _min_hits)
|
||||
{
|
||||
tracklet.tentative = false;
|
||||
}
|
||||
if ((tgts.frame_id-tracklet.frame_id <= _max_age) || (!tracklet.tentative && tracklet.frame_id == tgts.frame_id))
|
||||
{
|
||||
_new_tracklets.push_back(tracklet);
|
||||
}
|
||||
}
|
||||
_tracklets = _new_tracklets;
|
||||
std::vector <Tracklet> ().swap(_new_tracklets);
|
||||
}
|
||||
}
|
||||
|
||||
vector<Tracklet> SORT::getTracklets() const
|
||||
{
|
||||
return this->_tracklets;
|
||||
}
|
||||
|
||||
double SORT::_iou(Tracklet& tracklet, sv::Box& box)
|
||||
{
|
||||
double trackletX1 = tracklet.bbox(0)-tracklet.bbox(2)/2;
|
||||
double trackletY1 = tracklet.bbox(1)-tracklet.bbox(3)/2;
|
||||
double trackletX2 = tracklet.bbox(0) + tracklet.bbox(2)/2;
|
||||
double trackletY2 = tracklet.bbox(1) + tracklet.bbox(3)/2;
|
||||
|
||||
double detectionX1 = box.x1;
|
||||
double detectionY1 = box.y1;
|
||||
double detectionX2 = box.x2;
|
||||
double detectionY2 = box.y2;
|
||||
|
||||
double intersectionX1 = max(trackletX1, detectionX1);
|
||||
double intersectionY1 = max(trackletY1, detectionY1);
|
||||
double intersectionX2 = min(trackletX2, detectionX2);
|
||||
double intersectionY2 = min(trackletY2, detectionY2);
|
||||
|
||||
double w = (intersectionX2-intersectionX1 > 0.0) ? (intersectionX2-intersectionX1) : 0.0;
|
||||
double h = (intersectionY2-intersectionY1 > 0.0) ? (intersectionY2-intersectionY1) : 0.0;
|
||||
double intersectionArea = w * h;
|
||||
|
||||
double trackletArea = tracklet.bbox(2) * tracklet.bbox(3);
|
||||
|
||||
double detectionArea = (box.x2-box.x1) * (box.y2-box.y1);
|
||||
double unionArea = trackletArea + detectionArea - intersectionArea;
|
||||
double iou = (1 - intersectionArea / unionArea * 1.0);
|
||||
|
||||
return iou;
|
||||
}
|
||||
|
||||
// Function to find the minimum element in a vector
|
||||
double SORT::_findMin(const std::vector<double>& vec) {
|
||||
double minVal = std::numeric_limits<double>::max();
|
||||
for (double val : vec) {
|
||||
if (val < minVal) {
|
||||
minVal = val;
|
||||
}
|
||||
}
|
||||
return minVal;
|
||||
}
|
||||
|
||||
// Function to subtract the minimum value from each row of the cost matrix
|
||||
void SORT::_subtractMinFromRows(std::vector<std::vector<double>>& costMatrix) {
|
||||
for (auto& row : costMatrix) {
|
||||
double minVal = _findMin(row);
|
||||
for (double& val : row) {
|
||||
val -= minVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function to subtract the minimum value from each column of the cost matrix
|
||||
void SORT::_subtractMinFromCols(std::vector<std::vector<double>>& costMatrix) {
|
||||
for (size_t col = 0; col < costMatrix[0].size(); ++col) {
|
||||
double minVal = std::numeric_limits<double>::max();
|
||||
for (size_t row = 0; row < costMatrix.size(); ++row) {
|
||||
if (costMatrix[row][col] < minVal) {
|
||||
minVal = costMatrix[row][col];
|
||||
}
|
||||
}
|
||||
for (size_t row = 0; row < costMatrix.size(); ++row) {
|
||||
costMatrix[row][col] -= minVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function to find a matching using the Hungarian algorithm
|
||||
vector<pair<int, int> > SORT::_hungarian(vector<vector<double> > costMatrix)
|
||||
{
|
||||
size_t numRows = costMatrix.size();
|
||||
size_t numCols = costMatrix[0].size();
|
||||
|
||||
//transpose the matrix if necessary
|
||||
const bool transposed = numCols > numRows;
|
||||
if (transposed) {
|
||||
vector<vector<double>> transposedMatrix(numCols, vector<double>(numRows));
|
||||
for (int i = 0; i < numRows; i++)
|
||||
{
|
||||
for (int j = 0; j < numCols; j++)
|
||||
{
|
||||
transposedMatrix[j][i] = costMatrix[i][j];
|
||||
}
|
||||
}
|
||||
costMatrix = transposedMatrix;
|
||||
swap(numRows, numCols);
|
||||
}
|
||||
// Determine the larger dimension for matching
|
||||
size_t maxDim = std::max(numRows, numCols);
|
||||
|
||||
// Create a square cost matrix by padding with zeros if necessary
|
||||
std::vector<std::vector<double>> squareMatrix(maxDim, std::vector<double>(maxDim, 0.0));
|
||||
for (size_t row = 0; row < numRows; ++row) {
|
||||
for (size_t col = 0; col < numCols; ++col) {
|
||||
squareMatrix[row][col] = costMatrix[row][col];
|
||||
}
|
||||
}
|
||||
|
||||
// Subtract the minimum value from each row and column
|
||||
_subtractMinFromRows(squareMatrix);
|
||||
_subtractMinFromCols(squareMatrix);
|
||||
|
||||
// Initialize the assignment vectors with -1 values
|
||||
std::vector<int> rowAssignment(maxDim, -1);
|
||||
std::vector<int> colAssignment(maxDim, -1);
|
||||
|
||||
// Perform the matching
|
||||
for (size_t row = 0; row < maxDim; ++row) {
|
||||
std::vector<bool> visitedCols(maxDim, false);
|
||||
for (size_t col = 0; col < maxDim; ++col) {
|
||||
if (squareMatrix[row][col] == 0 && colAssignment[col] == -1) {
|
||||
rowAssignment[row] = col;
|
||||
colAssignment[col] = row;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the assignment vectors to pair<int, int> format
|
||||
std::vector<std::pair<int, int>> assignmentPairs;
|
||||
for (size_t row = 0; row < numRows; ++row) {
|
||||
int col = rowAssignment[row];
|
||||
//if (col != -1) {
|
||||
// assignmentPairs.emplace_back(row, col);
|
||||
// }
|
||||
if (col != -1) {
|
||||
if (col >= numCols) {
|
||||
col = -1;
|
||||
}
|
||||
assignmentPairs.emplace_back(row, col);
|
||||
}
|
||||
}
|
||||
if (transposed) {
|
||||
for (auto& assignment : assignmentPairs)
|
||||
{
|
||||
swap(assignment.first, assignment.second);
|
||||
}
|
||||
}
|
||||
return assignmentPairs;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#include "sot_ocv470_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
@ -31,39 +30,9 @@ bool SingleObjectTrackerOCV470Impl::ocv470Setup(SingleObjectTrackerBase* base_)
|
|||
std::string net = get_home() + SV_MODEL_DIR + "dasiamrpn_model.onnx";
|
||||
std::string kernel_cls1 = get_home() + SV_MODEL_DIR + "dasiamrpn_kernel_cls1.onnx";
|
||||
std::string kernel_r1 = get_home() + SV_MODEL_DIR + "dasiamrpn_kernel_r1.onnx";
|
||||
|
||||
std::vector<std::string> files1, files2, files3;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files1, ".onnx", "Ocv-DaSiamRPN-Model-");
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files2, ".onnx", "Ocv-DaSiamRPN-Kernel-CLS1-");
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files3, ".onnx", "Ocv-DaSiamRPN-Kernel-R1-");
|
||||
if (files1.size() > 0 && files2.size() > 0 && files3.size() > 0)
|
||||
{
|
||||
std::sort(files1.rbegin(), files1.rend(), _comp_str_lesser);
|
||||
std::sort(files2.rbegin(), files2.rend(), _comp_str_lesser);
|
||||
std::sort(files3.rbegin(), files3.rend(), _comp_str_lesser);
|
||||
net = get_home() + SV_MODEL_DIR + files1[0];
|
||||
kernel_cls1 = get_home() + SV_MODEL_DIR + files2[0];
|
||||
kernel_r1 = get_home() + SV_MODEL_DIR + files3[0];
|
||||
}
|
||||
std::cout << "Load: " << net << std::endl;
|
||||
std::cout << "Load: " << kernel_cls1 << std::endl;
|
||||
std::cout << "Load: " << kernel_r1 << std::endl;
|
||||
|
||||
std::string backbone = get_home() + SV_MODEL_DIR + "nanotrack_backbone_sim.onnx";
|
||||
std::string neckhead = get_home() + SV_MODEL_DIR + "nanotrack_head_sim.onnx";
|
||||
|
||||
std::vector<std::string> files4, files5;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files4, ".onnx", "Ocv-NanoTrack-Backbone-SIM-");
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files5, ".onnx", "Ocv-NanoTrack-Head-SIM-");
|
||||
if (files4.size() > 0 && files5.size() > 0)
|
||||
{
|
||||
std::sort(files4.rbegin(), files4.rend(), _comp_str_lesser);
|
||||
std::sort(files5.rbegin(), files5.rend(), _comp_str_lesser);
|
||||
backbone = get_home() + SV_MODEL_DIR + files4[0];
|
||||
neckhead = get_home() + SV_MODEL_DIR + files5[0];
|
||||
}
|
||||
std::cout << "Load: " << backbone << std::endl;
|
||||
std::cout << "Load: " << neckhead << std::endl;
|
||||
|
||||
try
|
||||
{
|
||||
|
|
|
@ -32,7 +32,7 @@ CameraAlgorithm::CameraAlgorithm()
|
|||
// this->_allocator = NULL;
|
||||
this->_t0 = std::chrono::system_clock::now();
|
||||
|
||||
this->alg_params_fn = _get_home() + SV_ROOT_DIR + "confs/sv_algorithm_params.json";
|
||||
this->alg_params_fn = _get_home() + SV_ROOT_DIR + "sv_algorithm_params.json";
|
||||
// std::cout << "CameraAlgorithm->alg_params_fn: " << this->alg_params_fn << std::endl;
|
||||
// if (_is_file_exist(params_fn))
|
||||
// this->loadAlgorithmParams(params_fn);
|
||||
|
@ -80,12 +80,6 @@ ArucoDetector::ArucoDetector()
|
|||
}
|
||||
|
||||
|
||||
void ArucoDetector::getIdsWithLengths(std::vector<int>& ids_, std::vector<double>& lengths_)
|
||||
{
|
||||
ids_ = this->_ids_need;
|
||||
lengths_ = this->_lengths_need;
|
||||
}
|
||||
|
||||
void ArucoDetector::_load()
|
||||
{
|
||||
JsonValue all_value;
|
||||
|
@ -916,14 +910,6 @@ void CommonObjectDetectorBase::setInputW(int w_)
|
|||
{
|
||||
this->_input_w = w_;
|
||||
}
|
||||
std::string CommonObjectDetectorBase::getModel()
|
||||
{
|
||||
return this->_model;
|
||||
}
|
||||
int CommonObjectDetectorBase::getBatchSize()
|
||||
{
|
||||
return this->_batch_size;
|
||||
}
|
||||
|
||||
void CommonObjectDetectorBase::warmUp()
|
||||
{
|
||||
|
@ -1010,11 +996,6 @@ void CommonObjectDetectorBase::detect(cv::Mat img_, TargetsInFrame& tgts_, Box*
|
|||
if (this->_with_segmentation)
|
||||
{
|
||||
cv::Mat mask_j = boxes_seg[j].clone();
|
||||
#ifdef WITH_INTEL
|
||||
tgt.setMask(mask_j);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_CUDA
|
||||
int maskh = mask_j.rows, maskw = mask_j.cols;
|
||||
assert(maskh == maskw);
|
||||
|
||||
|
@ -1044,7 +1025,6 @@ void CommonObjectDetectorBase::detect(cv::Mat img_, TargetsInFrame& tgts_, Box*
|
|||
{
|
||||
tgt.setMask(mask_j);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
tgts_.targets.push_back(tgt);
|
||||
|
@ -1090,8 +1070,6 @@ void CommonObjectDetectorBase::_load()
|
|||
this->_thrs_nms = 0.6;
|
||||
this->_thrs_conf = 0.4;
|
||||
this->_use_width_or_height = 0;
|
||||
this->_batch_size = 1;
|
||||
this->_model = "s";
|
||||
|
||||
for (auto i : detector_params_value) {
|
||||
|
||||
|
@ -1099,12 +1077,6 @@ void CommonObjectDetectorBase::_load()
|
|||
this->_dataset = i->value.toString();
|
||||
std::cout << "dataset: " << this->_dataset << std::endl;
|
||||
}
|
||||
else if ("batchSize" == std::string(i->key)) {
|
||||
this->_batch_size = i->value.toNumber();
|
||||
}
|
||||
else if ("model" == std::string(i->key)) {
|
||||
this->_model = i->value.toString();
|
||||
}
|
||||
else if ("inputSize" == std::string(i->key)) {
|
||||
// std::cout << "inputSize (old, new): " << this->_input_w << ", " << i->value.toNumber() << std::endl;
|
||||
this->_input_w = i->value.toNumber();
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#include "veri_det_cuda_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include "sv_util.h"
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
@ -76,16 +75,6 @@ namespace sv
|
|||
{
|
||||
#ifdef WITH_CUDA
|
||||
std::string trt_model_fn = get_home() + SV_MODEL_DIR + "veri.engine";
|
||||
|
||||
std::vector<std::string> files;
|
||||
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-VERI-mobilenet_v3");
|
||||
if (files.size() > 0)
|
||||
{
|
||||
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
|
||||
trt_model_fn = get_home() + SV_MODEL_DIR + files[0];
|
||||
}
|
||||
std::cout << "Load: " << trt_model_fn << std::endl;
|
||||
|
||||
if (!is_file_exist(trt_model_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the VeriDetector TensorRT model (File Not Exist)");
|
||||
|
|
|
@ -1,112 +0,0 @@
|
|||
#include "veri_det_intel_impl.h"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
|
||||
#define SV_MODEL_DIR "/SpireCV/models/"
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
||||
#include <iostream>
|
||||
#include <cmath>
|
||||
int BAT = 1;
|
||||
float cosineSimilarity(float *vec1, float *vec2, int size)
|
||||
{
|
||||
// 计算向量的点积
|
||||
float dotProduct = 0.0f;
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
dotProduct += vec1[i] * vec2[i];
|
||||
}
|
||||
|
||||
// 计算向量的模长
|
||||
float magnitudeVec1 = 0.0f;
|
||||
float magnitudeVec2 = 0.0f;
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
magnitudeVec1 += vec1[i] * vec1[i];
|
||||
magnitudeVec2 += vec2[i] * vec2[i];
|
||||
}
|
||||
magnitudeVec1 = std::sqrt(magnitudeVec1);
|
||||
magnitudeVec2 = std::sqrt(magnitudeVec2);
|
||||
|
||||
// 计算余弦相似性
|
||||
float similarity = dotProduct / (magnitudeVec1 * magnitudeVec2);
|
||||
|
||||
return similarity;
|
||||
}
|
||||
|
||||
namespace sv
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
using namespace dnn;
|
||||
#endif
|
||||
|
||||
VeriDetectorIntelImpl::VeriDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
VeriDetectorIntelImpl::~VeriDetectorIntelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
bool VeriDetectorIntelImpl::intelSetup()
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
std::string onnx_model_fn = get_home() + SV_MODEL_DIR + "veri.onnx";
|
||||
if (!is_file_exist(onnx_model_fn))
|
||||
{
|
||||
throw std::runtime_error("SpireCV (104) Error loading the VeriDetector openVINO model (File Not Exist)");
|
||||
}
|
||||
|
||||
// OpenVINO
|
||||
ov::Core core;
|
||||
this->compiled_model = core.compile_model(onnx_model_fn, "GPU");
|
||||
this->infer_request = compiled_model.create_infer_request();
|
||||
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void VeriDetectorIntelImpl::intelRoiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<float> &output_labels_)
|
||||
{
|
||||
#ifdef WITH_INTEL
|
||||
|
||||
Mat blobs;
|
||||
blobFromImages(input_rois_, blobs, 1 / 255.0, Size(224, 224), Scalar(0, 0, 0), true, true);
|
||||
|
||||
auto input_port = compiled_model.input();
|
||||
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blobs.ptr(0));
|
||||
|
||||
infer_request.infer();
|
||||
|
||||
const ov::Tensor &label_pre = infer_request.get_output_tensor(0);
|
||||
this->_p_prob1 = label_pre.data<float>();
|
||||
|
||||
const ov::Tensor &proto_pre = infer_request.get_output_tensor(1);
|
||||
this->_p_prob2 = proto_pre.data<float>();
|
||||
|
||||
// Find max index
|
||||
double max = 0;
|
||||
int label = 0;
|
||||
for (int i = 0; i < 576; ++i)
|
||||
{
|
||||
if (max < this->_p_prob1[i])
|
||||
{
|
||||
max = this->_p_prob1[i];
|
||||
label = i;
|
||||
}
|
||||
}
|
||||
|
||||
float similarity = cosineSimilarity(this->_p_prob2, this->_p_prob2 + 1280, 1280);
|
||||
|
||||
output_labels_.push_back(label);
|
||||
output_labels_.push_back(similarity);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
#ifndef __SV_VERI_DET_INTEL__
|
||||
#define __SV_VERI_DET_INTEL__
|
||||
|
||||
#include "sv_core.h"
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/aruco.hpp>
|
||||
#include <opencv2/tracking.hpp>
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#endif
|
||||
|
||||
namespace sv
|
||||
{
|
||||
class VeriDetectorIntelImpl
|
||||
{
|
||||
public:
|
||||
VeriDetectorIntelImpl();
|
||||
~VeriDetectorIntelImpl();
|
||||
|
||||
bool intelSetup();
|
||||
void intelRoiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<float> &output_labels_);
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
|
||||
float *_p_data;
|
||||
float *_p_prob1;
|
||||
float *_p_prob2;
|
||||
|
||||
ov::Tensor input_tensor;
|
||||
ov::InferRequest infer_request;
|
||||
ov::CompiledModel compiled_model;
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
|
@ -10,11 +10,6 @@
|
|||
#include "veri_det_cuda_impl.h"
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#include <openvino/openvino.hpp>
|
||||
#include "veri_det_intel_impl.h"
|
||||
#endif
|
||||
|
||||
#define SV_ROOT_DIR "/SpireCV/"
|
||||
|
||||
namespace sv
|
||||
|
@ -25,10 +20,6 @@ namespace sv
|
|||
#ifdef WITH_CUDA
|
||||
this->_cuda_impl = new VeriDetectorCUDAImpl;
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl = new VeriDetectorIntelImpl;
|
||||
#endif
|
||||
}
|
||||
VeriDetector::~VeriDetector()
|
||||
{
|
||||
|
@ -59,9 +50,6 @@ namespace sv
|
|||
return this->_cuda_impl->cudaSetup();
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
return this->_intel_impl->intelSetup();
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -75,12 +63,6 @@ namespace sv
|
|||
input_rois_,
|
||||
output_labels_);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
this->_intel_impl->intelRoiCNN(
|
||||
input_rois_,
|
||||
output_labels_);
|
||||
#endif
|
||||
}
|
||||
|
||||
void VeriDetector::detect(cv::Mat img_, const cv::Rect &bounding_box_, sv::Target &tgt)
|
||||
|
@ -112,23 +94,26 @@ namespace sv
|
|||
cv::Mat img_ground = cv::imread(img_ground_dir);
|
||||
cv::resize(img_ground, img_ground, cv::Size(224, 224));
|
||||
std::vector<cv::Mat> input_rois_ = {crop, img_ground};
|
||||
|
||||
std::vector<float> output_labels;
|
||||
#ifdef WITH_CUDA
|
||||
roiCNN(input_rois_, output_labels);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_INTEL
|
||||
#ifdef WITH_CUDA
|
||||
std::vector<float> output_labels;
|
||||
roiCNN(input_rois_, output_labels);
|
||||
#endif
|
||||
|
||||
|
||||
// auto t1 = std::chrono::system_clock::now();
|
||||
// tgts_.setFPS(1000.0 / std::chrono::duration_cast<std::chrono::milliseconds>(t1 - this->_t0).count());
|
||||
// this->_t0 = std::chrono::system_clock::now();
|
||||
// tgts_.setTimeNow();
|
||||
|
||||
if (output_labels.size() > 0)
|
||||
{
|
||||
// tgt.category_id = output_labels[0];
|
||||
tgt.sim_score = output_labels[1];
|
||||
// tgts_.targets.push_back(tgt);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void VeriDetector::getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz)
|
||||
void VeriDetector::getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz)
|
||||
{
|
||||
cv::Scalar avgChans = mean(srcImg);
|
||||
cv::Size imgSz = srcImg.size();
|
||||
|
|
|
@ -28,7 +28,6 @@ GX40GimbalDriver::GX40GimbalDriver(amovGimbal::IOStreamBase *_IO) : amovGimbal::
|
|||
targetPos[2] = 0;
|
||||
|
||||
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_IDLE;
|
||||
upDataTs = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()) - std::chrono::milliseconds(2000);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -108,8 +107,7 @@ uint32_t GX40GimbalDriver::pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payl
|
|||
// 惯导数据填充
|
||||
std::chrono::milliseconds nowTs = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch());
|
||||
// over 1s GNSS has losed
|
||||
if ((nowTs.count() - upDataTs.count()) < std::chrono::milliseconds(1500).count() &&
|
||||
cmd == GX40::GIMBAL_CMD_NOP)
|
||||
if ((nowTs.count() - upDataTs.count()) < std::chrono::milliseconds(1500).count())
|
||||
{
|
||||
primary->selfRoll = (int16_t)(-(carrierPos.roll / 0.01f));
|
||||
primary->selfPitch = (int16_t)(-(carrierPos.pitch / 0.01f));
|
||||
|
|
|
@ -20,9 +20,9 @@
|
|||
uint32_t GX40GimbalDriver::setGimabalPos(const AMOV_GIMBAL_POS_T &pos)
|
||||
{
|
||||
carrierStateMutex.lock();
|
||||
targetPos[0] = (int16_t)(pos.roll / 0.01f);
|
||||
targetPos[0] = (int16_t)(-pos.roll / 0.01f);
|
||||
targetPos[1] = (int16_t)(-pos.pitch / 0.01f);
|
||||
targetPos[2] = (int16_t)(pos.yaw / 0.01f);
|
||||
targetPos[2] = (int16_t)(-pos.yaw / 0.01f);
|
||||
carrierStateMutex.unlock();
|
||||
return pack(GX40::GIMBAL_CMD_MODE_EULER, nullptr, 0);
|
||||
}
|
||||
|
@ -38,9 +38,9 @@ uint32_t GX40GimbalDriver::setGimabalPos(const AMOV_GIMBAL_POS_T &pos)
|
|||
uint32_t GX40GimbalDriver::setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed)
|
||||
{
|
||||
carrierStateMutex.lock();
|
||||
targetPos[0] = (int16_t)(speed.roll / 0.1f);
|
||||
targetPos[0] = (int16_t)(-speed.roll / 0.1f);
|
||||
targetPos[1] = (int16_t)(-speed.pitch / 0.1f);
|
||||
targetPos[2] = (int16_t)(speed.yaw / 0.1f);
|
||||
targetPos[2] = (int16_t)(-speed.yaw / 0.1f);
|
||||
carrierStateMutex.unlock();
|
||||
|
||||
return pack(GX40::GIMBAL_CMD_MODE_FOLLOW, nullptr, 0);
|
||||
|
|
|
@ -509,50 +509,6 @@ void sv::Gimbal::setAngleRateEuler(double roll_rate, double pitch_rate, double y
|
|||
temp.yaw = yaw_rate;
|
||||
pdevTemp->setGimabalSpeed(temp);
|
||||
}
|
||||
void sv::Gimbal::attitudeCorrection(const GimbalQuaternionT &quaterion,
|
||||
const GimbalVelocityT &speed,
|
||||
const GimbalVelocityT &acc, void *extenData)
|
||||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
AMOV_GIMBAL_QUATERNION_T temp_q;
|
||||
temp_q.q0 = quaterion.q0;
|
||||
temp_q.q1 = quaterion.q1;
|
||||
temp_q.q2 = quaterion.q2;
|
||||
temp_q.q3 = quaterion.q3;
|
||||
|
||||
AMOV_GIMBAL_VELOCITY_T temp_v1, temp_v2;
|
||||
temp_v1.x = speed.x;
|
||||
temp_v1.y = speed.y;
|
||||
temp_v1.z = speed.z;
|
||||
|
||||
temp_v2.x = acc.x;
|
||||
temp_v2.y = acc.y;
|
||||
temp_v2.z = acc.z;
|
||||
|
||||
pdevTemp->attitudeCorrection(temp_q, temp_v1, temp_v2, extenData);
|
||||
}
|
||||
|
||||
void sv::Gimbal::attitudeCorrection(const GimbalPosT &pos,
|
||||
const GimbalVelocityT &speed,
|
||||
const GimbalVelocityT &acc, void *extenData)
|
||||
{
|
||||
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
|
||||
AMOV_GIMBAL_VELOCITY_T temp_v1, temp_v2;
|
||||
temp_v1.x = speed.x;
|
||||
temp_v1.y = speed.y;
|
||||
temp_v1.z = speed.z;
|
||||
|
||||
temp_v2.x = acc.x;
|
||||
temp_v2.y = acc.y;
|
||||
temp_v2.z = acc.z;
|
||||
|
||||
AMOV_GIMBAL_POS_T temp_p;
|
||||
temp_p.pitch = pos.pitch;
|
||||
temp_p.yaw = pos.yaw;
|
||||
temp_p.roll = pos.roll;
|
||||
|
||||
pdevTemp->attitudeCorrection(temp_p, temp_v1, temp_v2, extenData);
|
||||
}
|
||||
|
||||
sv::Gimbal::~Gimbal()
|
||||
{
|
||||
|
|
|
@ -46,7 +46,6 @@ class ArucoDetector : public CameraAlgorithm
|
|||
public:
|
||||
ArucoDetector();
|
||||
void detect(cv::Mat img_, TargetsInFrame& tgts_);
|
||||
void getIdsWithLengths(std::vector<int>& ids_, std::vector<double>& lengths_);
|
||||
private:
|
||||
void _load();
|
||||
bool _params_loaded;
|
||||
|
@ -140,8 +139,6 @@ public:
|
|||
double getThrsConf();
|
||||
int useWidthOrHeight();
|
||||
bool withSegmentation();
|
||||
std::string getModel();
|
||||
int getBatchSize();
|
||||
protected:
|
||||
virtual bool setupImpl();
|
||||
virtual void detectImpl(
|
||||
|
@ -168,8 +165,6 @@ protected:
|
|||
double _thrs_conf;
|
||||
int _use_width_or_height;
|
||||
bool _with_segmentation;
|
||||
std::string _model;
|
||||
int _batch_size;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
#ifndef __SV_CAMERA__
|
||||
#define __SV_CAMERA__
|
||||
|
||||
#include "opencv2/opencv.hpp"
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
enum class CameraType : int
|
||||
{
|
||||
// 未知相机
|
||||
NONE = 0X00001000,
|
||||
// 标准opencv框架下的相机
|
||||
V4L2CAM = 0X00001001,
|
||||
WEBCAM = 0X00001002,
|
||||
// 视频文件
|
||||
VIDEOFILE = 0X00002001,
|
||||
// 普通流媒体相机
|
||||
STREAMING = 0X00004001,
|
||||
// 普通的MIPI-CSI相机
|
||||
MIPI = 0X00008001,
|
||||
// 进行了传输优化适配的相机
|
||||
G1 = 0X00010001,
|
||||
Q10 = 0X00010002,
|
||||
GX40 = 0X00010003,
|
||||
MC1 = 0X00010004,
|
||||
// 虚拟的相机设备 主要仿真接入
|
||||
VIRTUAL = 0X00020001,
|
||||
};
|
||||
|
||||
class Camera
|
||||
{
|
||||
private:
|
||||
void *dev;
|
||||
|
||||
public:
|
||||
// 构建时根据相机类型实例化对应的相机类
|
||||
// timeOut: 相机连接超时时间;超过该时间没有读取到新的帧,则认为相机失去连接 单位ms
|
||||
Camera(CameraType type, int timeOut = 500);
|
||||
|
||||
// 基本配置 必须调用下述的一个接口后才能open
|
||||
bool setStream(const std::string &ip, int port);
|
||||
bool setName(const std::string &name);
|
||||
bool setIndex(int index);
|
||||
// 基础功能
|
||||
bool open(void);
|
||||
bool read(cv::Mat &image);
|
||||
bool readNoBlock(cv::Mat &image);
|
||||
void release(void);
|
||||
|
||||
// 属性配置
|
||||
void setWH(int width, int height);
|
||||
void setFps(int fps);
|
||||
void setBrightness(double brightness);
|
||||
void setContrast(double contrast);
|
||||
void setSaturation(double saturation);
|
||||
void setHue(double hue);
|
||||
void setExposure(double exposure);
|
||||
|
||||
// 获取属性
|
||||
bool isActive(void);
|
||||
CameraType getType(void);
|
||||
std::string getName(void);
|
||||
|
||||
int getW(void);
|
||||
int getH(void);
|
||||
int getExpectFps(void);
|
||||
double getFps(void);
|
||||
double getBrightness(void);
|
||||
double getContrast(void);
|
||||
double getSaturation(void);
|
||||
double getHue(void);
|
||||
double getExposure(void);
|
||||
|
||||
~Camera();
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
|
@ -25,6 +25,7 @@ public:
|
|||
double line_location_a2;
|
||||
|
||||
bool is_load_parameter;
|
||||
|
||||
std::string line_color;
|
||||
|
||||
protected:
|
||||
|
@ -39,4 +40,4 @@ protected:
|
|||
void seg(cv::Mat line_area_, cv::Mat line_area_a1_, cv::Mat line_area_a2_, std::string line_color_, cv::Point ¢er_, int &area_, cv::Point ¢er_a1_, cv::Point ¢er_a2_);
|
||||
};
|
||||
}
|
||||
#endif
|
||||
#endif
|
|
@ -8,36 +8,33 @@
|
|||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
namespace sv
|
||||
{
|
||||
|
||||
namespace sv {
|
||||
|
||||
class CommonObjectDetectorCUDAImpl;
|
||||
class CommonObjectDetectorIntelImpl;
|
||||
|
||||
|
||||
class CommonObjectDetector : public CommonObjectDetectorBase
|
||||
{
|
||||
public:
|
||||
CommonObjectDetector(bool input_4k = false);
|
||||
CommonObjectDetector(bool input_4k=false);
|
||||
~CommonObjectDetector();
|
||||
|
||||
protected:
|
||||
bool setupImpl();
|
||||
void detectImpl(
|
||||
cv::Mat img_,
|
||||
std::vector<float> &boxes_x_,
|
||||
std::vector<float> &boxes_y_,
|
||||
std::vector<float> &boxes_w_,
|
||||
std::vector<float> &boxes_h_,
|
||||
std::vector<int> &boxes_label_,
|
||||
std::vector<float> &boxes_score_,
|
||||
std::vector<cv::Mat> &boxes_seg_);
|
||||
|
||||
CommonObjectDetectorCUDAImpl *_cuda_impl;
|
||||
CommonObjectDetectorIntelImpl *_intel_impl;
|
||||
std::vector<float>& boxes_x_,
|
||||
std::vector<float>& boxes_y_,
|
||||
std::vector<float>& boxes_w_,
|
||||
std::vector<float>& boxes_h_,
|
||||
std::vector<int>& boxes_label_,
|
||||
std::vector<float>& boxes_score_,
|
||||
std::vector<cv::Mat>& boxes_seg_
|
||||
);
|
||||
|
||||
CommonObjectDetectorCUDAImpl* _cuda_impl;
|
||||
bool _input_4k;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -78,28 +78,6 @@ namespace sv
|
|||
FLOWCONTROL_HARDWARE = 2,
|
||||
};
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double q0;
|
||||
double q1;
|
||||
double q2;
|
||||
double q3;
|
||||
} GimbalQuaternionT;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double x; // or N
|
||||
double y; // or E
|
||||
double z; // or UP
|
||||
} GimbalVelocityT;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double yaw;
|
||||
double roll;
|
||||
double pitch;
|
||||
} GimbalPosT;
|
||||
|
||||
static inline void emptyCallback(double &frameAngleRoll, double &frameAnglePitch, double &frameAngleYaw,
|
||||
double &imuAngleRoll, double &imuAnglePitch, double &imuAngleYaw,
|
||||
double &fovX, double &fovY)
|
||||
|
@ -187,12 +165,6 @@ namespace sv
|
|||
bool takePhoto();
|
||||
bool takeVideo(int state);
|
||||
int getVideoState();
|
||||
void attitudeCorrection(const GimbalQuaternionT &quaterion,
|
||||
const GimbalVelocityT &speed,
|
||||
const GimbalVelocityT &acc, void *extenData);
|
||||
void attitudeCorrection(const GimbalPosT &pos,
|
||||
const GimbalVelocityT &speed,
|
||||
const GimbalVelocityT &acc, void *extenData);
|
||||
|
||||
//! Set gimbal angles
|
||||
/*!
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
namespace sv {
|
||||
|
||||
class LandingMarkerDetectorCUDAImpl;
|
||||
class LandingMarkerDetectorIntelImpl;
|
||||
|
||||
class LandingMarkerDetector : public LandingMarkerDetectorBase
|
||||
{
|
||||
|
@ -26,8 +25,7 @@ protected:
|
|||
std::vector<int>& output_labels_
|
||||
);
|
||||
|
||||
LandingMarkerDetectorCUDAImpl *_cuda_impl;
|
||||
LandingMarkerDetectorIntelImpl *_intel_impl;
|
||||
LandingMarkerDetectorCUDAImpl* _cuda_impl;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ namespace sv {
|
|||
|
||||
|
||||
class SORT;
|
||||
class BYTETracker;
|
||||
|
||||
|
||||
class MultipleObjectTracker : public CameraAlgorithm
|
||||
{
|
||||
|
@ -24,7 +24,7 @@ public:
|
|||
~MultipleObjectTracker();
|
||||
|
||||
void init(CommonObjectDetector* detector_);
|
||||
sv::TargetsInFrame track(cv::Mat img_, TargetsInFrame& tgts_);
|
||||
void track(cv::Mat img_, TargetsInFrame& tgts_);
|
||||
|
||||
private:
|
||||
void _load();
|
||||
|
@ -39,12 +39,70 @@ private:
|
|||
double _iou_thres;
|
||||
int _max_age;
|
||||
int _min_hits;
|
||||
int _frame_rate;
|
||||
int _track_buffer;
|
||||
|
||||
SORT* _sort_impl;
|
||||
BYTETracker* _bytetrack_impl;
|
||||
CommonObjectDetector* _detector;
|
||||
};
|
||||
|
||||
|
||||
// define the tracklet struct to store the tracked objects.
|
||||
struct Tracklet
|
||||
{
|
||||
/* data */
|
||||
public:
|
||||
Eigen::Vector4d bbox; // double x, y, w, h;
|
||||
int id = 0;
|
||||
int age;
|
||||
int hits;
|
||||
int misses;
|
||||
int frame_id = 0;
|
||||
int category_id;
|
||||
bool tentative;
|
||||
std::vector<double> features;
|
||||
Eigen::Matrix<double, 8, 1> mean;
|
||||
Eigen::Matrix<double, 8, 8> covariance;
|
||||
};
|
||||
|
||||
|
||||
class KalmanFilter {
|
||||
public:
|
||||
KalmanFilter();
|
||||
~KalmanFilter();
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > initiate(Eigen::Vector4d &bbox);
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > update(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances, Box &box);
|
||||
std::pair<Eigen::Matrix<double, 8, 1>, Eigen::Matrix<double, 8, 8> > predict(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances);
|
||||
std::pair<Eigen::Matrix<double, 4, 1>, Eigen::Matrix<double, 4, 4> > project(Eigen::Matrix<double, 8, 1> mean, Eigen::Matrix<double, 8, 8> covariances);
|
||||
private:
|
||||
Eigen::Matrix<double, 8, 8> _F;
|
||||
Eigen::Matrix<double, 4, 8> _H;
|
||||
Eigen::Matrix<double, 9, 1> _chi2inv95;
|
||||
double _std_weight_position;
|
||||
double _std_weight_vel;
|
||||
};
|
||||
|
||||
|
||||
class SORT {
|
||||
public:
|
||||
SORT(double iou_threshold, int max_age, int min_hits): _iou_threshold(iou_threshold), _max_age(max_age), _min_hits(min_hits), _next_tracklet_id(0) {};
|
||||
~SORT();
|
||||
void update(TargetsInFrame &tgts);
|
||||
std::vector<Tracklet> getTracklets() const;
|
||||
private:
|
||||
double _iou(Tracklet &tracklet, Box &box);
|
||||
std::vector<std::pair<int,int>> _hungarian(std::vector<std::vector<double>> costMatrix);
|
||||
double _findMin(const std::vector<double>& vec);
|
||||
void _subtractMinFromRows(std::vector<std::vector<double>>& costMatrix);
|
||||
void _subtractMinFromCols(std::vector<std::vector<double>>& costMatrix);
|
||||
//bool _augment(const std::vector<std::vector<double>>& costMatrix, int row, std::vector<int>& rowMatch, std::vector<int>& colMatch, std::vector<bool>& visited);
|
||||
|
||||
double _iou_threshold;
|
||||
int _max_age;
|
||||
int _min_hits;
|
||||
int _next_tracklet_id;
|
||||
std::vector <Tracklet> _tracklets;
|
||||
std::vector <Tracklet> _new_tracklets;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -11,34 +11,33 @@
|
|||
namespace sv
|
||||
{
|
||||
|
||||
class VeriDetectorCUDAImpl;
|
||||
class VeriDetectorIntelImpl;
|
||||
class VeriDetectorCUDAImpl;
|
||||
|
||||
class VeriDetector : public LandingMarkerDetectorBase
|
||||
{
|
||||
public:
|
||||
VeriDetector();
|
||||
~VeriDetector();
|
||||
class VeriDetector : public LandingMarkerDetectorBase
|
||||
{
|
||||
public:
|
||||
VeriDetector();
|
||||
~VeriDetector();
|
||||
|
||||
void detect(cv::Mat img_, const cv::Rect &bounding_box_, sv::Target &tgt);
|
||||
void detect(cv::Mat img_, const cv::Rect &bounding_box_, sv::Target &tgt);
|
||||
|
||||
protected:
|
||||
void _load();
|
||||
bool setupImpl();
|
||||
void roiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<float> &output_labels_);
|
||||
void getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz);
|
||||
protected:
|
||||
void _load();
|
||||
bool setupImpl();
|
||||
void roiCNN(
|
||||
std::vector<cv::Mat> &input_rois_,
|
||||
std::vector<float> &output_labels_);
|
||||
void getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz);
|
||||
|
||||
std::string vehicle_id;
|
||||
std::string vehicle_id;
|
||||
|
||||
// Save the target bounding box for each frame.
|
||||
std::vector<float> targetSz = {0, 0}; // H and W of bounding box
|
||||
std::vector<float> targetPos = {0, 0}; // center point of bounding box (x, y)
|
||||
// Save the target bounding box for each frame.
|
||||
std::vector<float> targetSz = {0, 0}; // H and W of bounding box
|
||||
std::vector<float> targetPos = {0, 0}; // center point of bounding box (x, y)
|
||||
|
||||
VeriDetectorCUDAImpl *_cuda_impl;
|
||||
VeriDetectorIntelImpl *_intel_impl;
|
||||
};
|
||||
|
||||
VeriDetectorCUDAImpl *_cuda_impl;
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -128,7 +128,6 @@ public:
|
|||
|
||||
bool getBox(Box& b);
|
||||
bool getAruco(int& id, std::vector<cv::Point2f> &corners);
|
||||
bool getAruco(int& id, std::vector<cv::Point2f> &corners, cv::Vec3d &rvecs, cv::Vec3d &tvecs);
|
||||
bool getEllipse(double& xc_, double& yc_, double& a_, double& b_, double& rad_);
|
||||
std::string getJsonStr();
|
||||
|
||||
|
@ -327,66 +326,59 @@ protected:
|
|||
};
|
||||
|
||||
|
||||
enum class CameraType {NONE, WEBCAM, V4L2CAM, MIPI, RTSP, VIDEO, G1, Q10, GX40};
|
||||
// enum class CameraType {NONE, WEBCAM, V4L2CAM, G1, Q10, MIPI, GX40};
|
||||
|
||||
class CameraBase {
|
||||
public:
|
||||
CameraBase(CameraType type=CameraType::NONE, int id=0);
|
||||
~CameraBase();
|
||||
void open(CameraType type=CameraType::V4L2CAM, int id=0);
|
||||
bool read(cv::Mat& image);
|
||||
void release();
|
||||
// class CameraBase {
|
||||
// public:
|
||||
// CameraBase(CameraType type=CameraType::NONE, int id=0);
|
||||
// ~CameraBase();
|
||||
// void open(CameraType type=CameraType::WEBCAM, int id=0);
|
||||
// bool read(cv::Mat& image);
|
||||
// void release();
|
||||
|
||||
int getW();
|
||||
int getH();
|
||||
int getFps();
|
||||
std::string getIp();
|
||||
int getPort();
|
||||
double getBrightness();
|
||||
double getContrast();
|
||||
double getSaturation();
|
||||
double getHue();
|
||||
double getExposure();
|
||||
std::string getFourcc();
|
||||
bool isRunning();
|
||||
void setFourcc(std::string fourcc);
|
||||
void setWH(int width, int height);
|
||||
void setFps(int fps);
|
||||
void setIp(std::string ip);
|
||||
void setRtspUrl(std::string rtsp_url);
|
||||
void setVideoPath(std::string video_path);
|
||||
void setPort(int port);
|
||||
void setBrightness(double brightness);
|
||||
void setContrast(double contrast);
|
||||
void setSaturation(double saturation);
|
||||
void setHue(double hue);
|
||||
void setExposure(double exposure);
|
||||
protected:
|
||||
virtual void openImpl();
|
||||
void _run();
|
||||
// int getW();
|
||||
// int getH();
|
||||
// int getFps();
|
||||
// std::string getIp();
|
||||
// int getPort();
|
||||
// double getBrightness();
|
||||
// double getContrast();
|
||||
// double getSaturation();
|
||||
// double getHue();
|
||||
// double getExposure();
|
||||
// bool isRunning();
|
||||
// void setWH(int width, int height);
|
||||
// void setFps(int fps);
|
||||
// void setIp(std::string ip);
|
||||
// void setPort(int port);
|
||||
// void setBrightness(double brightness);
|
||||
// void setContrast(double contrast);
|
||||
// void setSaturation(double saturation);
|
||||
// void setHue(double hue);
|
||||
// void setExposure(double exposure);
|
||||
// protected:
|
||||
// virtual void openImpl();
|
||||
// void _run();
|
||||
|
||||
bool _is_running;
|
||||
bool _is_updated;
|
||||
std::thread _tt;
|
||||
cv::VideoCapture _cap;
|
||||
cv::Mat _frame;
|
||||
CameraType _type;
|
||||
int _camera_id;
|
||||
// bool _is_running;
|
||||
// bool _is_updated;
|
||||
// std::thread _tt;
|
||||
// cv::VideoCapture _cap;
|
||||
// cv::Mat _frame;
|
||||
// CameraType _type;
|
||||
// int _camera_id;
|
||||
|
||||
int _width;
|
||||
int _height;
|
||||
int _fps;
|
||||
std::string _ip;
|
||||
std::string _rtsp_url;
|
||||
std::string _video_path;
|
||||
int _port;
|
||||
double _brightness;
|
||||
double _contrast;
|
||||
double _saturation;
|
||||
double _hue;
|
||||
double _exposure;
|
||||
std::string _fourcc;
|
||||
};
|
||||
// int _width;
|
||||
// int _height;
|
||||
// int _fps;
|
||||
// std::string _ip;
|
||||
// int _port;
|
||||
// double _brightness;
|
||||
// double _contrast;
|
||||
// double _saturation;
|
||||
// double _hue;
|
||||
// double _exposure;
|
||||
// };
|
||||
|
||||
|
||||
void drawTargetsInFrame(
|
||||
|
@ -401,16 +393,6 @@ void drawTargetsInFrame(
|
|||
bool with_aruco=false,
|
||||
bool with_yaw=false
|
||||
);
|
||||
cv::Mat drawTargetsInFrame(
|
||||
const cv::Mat img_,
|
||||
const TargetsInFrame& tgts_,
|
||||
const double scale,
|
||||
bool with_all=true,
|
||||
bool with_category=false,
|
||||
bool with_tid=false,
|
||||
bool with_seg=false,
|
||||
bool with_box=false
|
||||
);
|
||||
std::string get_home();
|
||||
bool is_file_exist(std::string& fn);
|
||||
void list_dir(std::string dir, std::vector<std::string>& files, std::string suffixs="", bool r=false);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#if 0
|
||||
#ifndef __SV_VIDEO_INPUT__
|
||||
#define __SV_VIDEO_INPUT__
|
||||
|
||||
|
@ -25,3 +26,4 @@ protected:
|
|||
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -126,9 +124,7 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["h"],
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 1280,
|
||||
"withSegmentation": false,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": false,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -126,9 +124,7 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -125,9 +123,8 @@
|
|||
"reid_num_samples": 10,
|
||||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"max_age": 10,
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": false,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": false,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -126,9 +124,7 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 1280,
|
||||
"withSegmentation": false,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": false,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -126,9 +124,7 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": false,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -126,9 +124,7 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -126,9 +124,7 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -126,9 +124,7 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -126,9 +124,7 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"CommonObjectDetector": {
|
||||
"dataset": "COCO",
|
||||
"model": "s",
|
||||
"batchSize": 1,
|
||||
"inputSize": 640,
|
||||
"withSegmentation": true,
|
||||
"nmsThrs": 0.6,
|
||||
"scoreThrs": 0.4,
|
||||
"useWidthOrHeight": 1,
|
||||
"withSegmentation": true,
|
||||
"datasetPersonVehicle": {
|
||||
"person": [0.5, 1.8],
|
||||
"car": [4.1, 1.5],
|
||||
|
@ -126,9 +124,7 @@
|
|||
"reid_match_thres": 2.0,
|
||||
"iou_thres": 0.5,
|
||||
"max_age": 10,
|
||||
"min_hits": 3,
|
||||
"frame_rate": 30,
|
||||
"track_buffer": 30
|
||||
"min_hits": 3
|
||||
},
|
||||
"LandingMarkerDetector": {
|
||||
"labels": ["x", "h"],
|
||||
|
|
|
@ -39,6 +39,8 @@ the use of this software, even if advised of the possibility of such damage.
|
|||
#include <sv_world.h>
|
||||
#include "aruco_samples_utility.hpp"
|
||||
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
|
@ -146,10 +148,14 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
// VideoCapture inputVideo;
|
||||
sv::Camera inputVideo;
|
||||
sv::Camera inputVideo(c_type);
|
||||
inputVideo.setIndex(camId);
|
||||
inputVideo.setStream("192.168.144.64",554);
|
||||
|
||||
inputVideo.setWH(imW, imH);
|
||||
inputVideo.setFps(fps);
|
||||
inputVideo.open(c_type, camId);
|
||||
|
||||
inputVideo.open();
|
||||
|
||||
int waitTime = 10;
|
||||
|
||||
|
|
|
@ -97,8 +97,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
if (landingPadUnit > 0) {
|
||||
Mat markerImgBIG = Mat::ones(landingPadUnit * 62, landingPadUnit * 62, CV_8UC1) * 255;
|
||||
// markerId = 0;
|
||||
markerId --;
|
||||
markerId = 0;
|
||||
markerSize = landingPadUnit * 4;
|
||||
borderSize = landingPadUnit;
|
||||
int newSize = markerSize + borderSize * 2;
|
||||
|
@ -223,7 +222,7 @@ int main(int argc, char *argv[]) {
|
|||
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
|
||||
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 10, landingPadUnit * 28, newSize, newSize)));
|
||||
|
||||
// markerId = 90;
|
||||
markerId = 90;
|
||||
markerSize = landingPadUnit * 4 * 4;
|
||||
borderSize = landingPadUnit * 4;
|
||||
newSize = markerSize + borderSize * 2;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -9,14 +10,15 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化Aruco检测器类
|
||||
sv::ArucoDetector ad;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
ad.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
ad.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
|
||||
cap.setIndex(0);
|
||||
cap.setWH(ad.image_width, ad.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
|
@ -2,15 +2,17 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
cap.setWH(1280, 720);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
// cap.setWH(640, 480);
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
while (1)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace sv;
|
||||
|
@ -11,14 +12,14 @@ int main(int argc, char *argv[])
|
|||
// 实例化 color line detection 检测器类
|
||||
sv::ColorLineDetector cld;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
cld.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cld.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
cld.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(cld.image_width, cld.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -9,14 +10,14 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化 通用目标 检测器类
|
||||
sv::CommonObjectDetector cod;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(cod.image_width, cod.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -40,20 +41,19 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化 框选目标跟踪类
|
||||
sv::SingleObjectTracker sot;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
sot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
sot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
|
||||
sv::CommonObjectDetector cod;
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(cod.image_width, cod.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -9,14 +10,15 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化 椭圆 检测器类
|
||||
sv::EllipseDetector ed;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
ed.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
ed.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
ed.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(ed.image_width, ed.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <string>
|
||||
#include <experimental/filesystem>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
|
||||
using namespace std;
|
||||
namespace fs = std::experimental::filesystem;
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
// 实例化
|
||||
sv::CommonObjectDetector cod;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
sv::MultipleObjectTracker mot;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
mot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
mot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
mot.init(&cod);
|
||||
|
||||
// 打开摄像头
|
||||
/*
|
||||
sv::Camera cap;
|
||||
cap.setWH(mot.image_width, mot.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
*/
|
||||
std::string mot17_folder_path = sv::get_home()+"/SpireCV/dataset/MOT17/train/";
|
||||
std::string pred_file_path = sv::get_home()+"/SpireCV/dataset/pred_mot17/data/";
|
||||
for (auto & seq_path : std::experimental::filesystem::directory_iterator(mot17_folder_path))
|
||||
{
|
||||
// mkdir pred dirs and touch pred_files
|
||||
string pred_file = pred_file_path + seq_path.path().filename().string() + ".txt";
|
||||
fs::create_directories(pred_file_path);
|
||||
std::ofstream file(pred_file);
|
||||
// listdir seqence images
|
||||
string seq_image_paths = mot17_folder_path + seq_path.path().filename().string() + "/img1";
|
||||
// cout << seq_image_paths <<endl;
|
||||
std::vector<std::string> seq_image_file_path;
|
||||
cv::glob(seq_image_paths, seq_image_file_path);
|
||||
|
||||
//eval MOT algorithms
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
while (frame_id < seq_image_file_path.size())
|
||||
{
|
||||
img = cv::imread(seq_image_file_path[frame_id]);
|
||||
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
|
||||
sv::TargetsInFrame tgts(frame_id++);
|
||||
// 读取一帧图像到img
|
||||
//cap.read(img);
|
||||
//cv::resize(img, img, cv::Size(mot.image_width, mot.image_height));
|
||||
|
||||
// 执行通用目标检测
|
||||
sv::TargetsInFrame person_tgts = mot.track(img, tgts);
|
||||
// 可视化检测结果,叠加到img上
|
||||
sv::drawTargetsInFrame(img, person_tgts);
|
||||
// printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
|
||||
for (auto target : person_tgts.targets)
|
||||
{
|
||||
int center_x = int(target.cx * tgts.width);
|
||||
int center_y = int(target.cy * tgts.height);
|
||||
int width = int(target.w * tgts.width);
|
||||
int height = int(target.h * tgts.height);
|
||||
double conf = target.score;
|
||||
file << frame_id << ","<< target.tracked_id << "," << center_x - width / 2 << "," << center_y - height / 2 << "," << width << "," << height << "," << conf << "," << "-1,-1,-1" << endl;
|
||||
// file << frame_id << ","<< target.tracked_id << "," << center_x << "," << center_y << "," << width << "," << height << "," << conf << "," << "-1,-1,-1" << endl;
|
||||
}
|
||||
cv::imshow("img", img);
|
||||
cv::waitKey(10);
|
||||
}
|
||||
file.close();
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include <map>
|
||||
#include "sv_camera.h"
|
||||
|
||||
// 定义窗口名称
|
||||
static const std::string RGB_WINDOW = "Image window";
|
||||
|
@ -34,36 +35,44 @@ void gimbalNoTrack(void)
|
|||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
// 实例化 框选目标跟踪类
|
||||
sv::SingleObjectTracker sot;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
|
||||
|
||||
sv::CommonObjectDetector cod;
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
|
||||
|
||||
// 实例化吊舱
|
||||
gimbal = new sv::Gimbal(sv::GimbalType::G1, sv::GimbalLink::SERIAL);
|
||||
// 使用 /dev/ttyUSB0 作为控制串口
|
||||
gimbal->setSerialPort("/dev/ttyUSB0");
|
||||
// 以GimableCallback作为状态回调函数启用吊舱控制
|
||||
gimbal->open(GimableCallback);
|
||||
// 定义相机
|
||||
sv::Camera cap;
|
||||
// 设置相机流媒体地址为 192.168.2.64
|
||||
cap.setIp("192.168.2.64");
|
||||
// 设置获取画面分辨率为720P
|
||||
cap.setWH(1280, 720);
|
||||
// 设置视频帧率为30帧
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::G1);
|
||||
cap.setStream("192.168.2.64",554);
|
||||
cap.setWH(cod.image_width, cod.image_height);
|
||||
cap.setFps(30);
|
||||
// 开启相机
|
||||
cap.open(sv::CameraType::G1);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
// // 定义相机
|
||||
// sv::Camera cap;
|
||||
// // 设置相机流媒体地址为 192.168.2.64
|
||||
// cap.setIp("192.168.2.64");
|
||||
// // 设置获取画面分辨率为720P
|
||||
// cap.setWH(1280, 720);
|
||||
// // 设置视频帧率为30帧
|
||||
// cap.setFps(30);
|
||||
// // 开启相机
|
||||
// cap.open(sv::CameraType::G1);
|
||||
|
||||
// 定义一个新的窗口,可在上面进行框选操作
|
||||
cv::namedWindow(RGB_WINDOW);
|
||||
// 设置窗口操作回调函数,该函数实现整个框选逻辑
|
||||
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
|
||||
// 实例化 框选目标跟踪类
|
||||
sv::SingleObjectTracker sot;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
sot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
sot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
sv::CommonObjectDetector cod;
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
|
@ -258,4 +267,4 @@ void GimableCallback(double &frame_ang_r, double &frame_ang_p, double &frame_ang
|
|||
count = 0;
|
||||
}
|
||||
count++;
|
||||
}
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
// #include "gimbal_tools.hpp"
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -33,33 +34,40 @@ void gimbalNoTrack(void)
|
|||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
{
|
||||
// 实例化 圆形降落标志 检测器类
|
||||
sv::LandingMarkerDetector lmd;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
lmd.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
|
||||
// 实例化吊舱
|
||||
gimbal = new sv::Gimbal(sv::GimbalType::G1, sv::GimbalLink::SERIAL);
|
||||
// 使用 /dev/ttyUSB0 作为控制串口
|
||||
gimbal->setSerialPort("/dev/ttyUSB0");
|
||||
// 以GimableCallback作为状态回调函数启用吊舱控制
|
||||
gimbal->open(GimableCallback);
|
||||
// 定义相机
|
||||
sv::Camera cap;
|
||||
// 设置相机流媒体地址为 192.168.2.64
|
||||
cap.setIp("192.168.2.64");
|
||||
// 设置获取画面分辨率为720P
|
||||
cap.setWH(1280, 720);
|
||||
// 设置视频帧率为30帧
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::G1);
|
||||
cap.setStream("192.168.2.64", 554);
|
||||
cap.setWH(lmd.image_width, lmd.image_height);
|
||||
cap.setFps(30);
|
||||
// 开启相机
|
||||
cap.open(sv::CameraType::G1);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
// // 定义相机
|
||||
// sv::Camera cap;
|
||||
// // 设置相机流媒体地址为 192.168.2.64
|
||||
// cap.setIp("192.168.2.64");
|
||||
// // 设置获取画面分辨率为720P
|
||||
// cap.setWH(1280, 720);
|
||||
// // 设置视频帧率为30帧
|
||||
// cap.setFps(30);
|
||||
// // 开启相机
|
||||
// cap.open(sv::CameraType::G1);
|
||||
|
||||
// 定义一个窗口 才可以在上面操作
|
||||
cv::namedWindow(RGB_WINDOW);
|
||||
// 设置窗口操作回调函数,该函数实现吊舱控制
|
||||
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
|
||||
// 实例化 圆形降落标志 检测器类
|
||||
sv::LandingMarkerDetector lmd;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
lmd.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
lmd.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
|
@ -145,4 +153,4 @@ void GimableCallback(double &frame_ang_r, double &frame_ang_p, double &frame_ang
|
|||
count = 0;
|
||||
}
|
||||
count++;
|
||||
}
|
||||
}
|
|
@ -2,6 +2,7 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -42,33 +43,41 @@ static const std::string RGB_WINDOW = "Image window";
|
|||
void onMouse(int event, int x, int y, int, void *);
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
{
|
||||
// 实例化Aruco检测器类
|
||||
sv::ArucoDetector ad;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
|
||||
// 实例化吊舱
|
||||
gimbal = new sv::Gimbal(sv::GimbalType::G1, sv::GimbalLink::SERIAL);
|
||||
// 使用 /dev/ttyUSB0 作为控制串口
|
||||
gimbal->setSerialPort("/dev/ttyUSB0");
|
||||
// 以gimableCallback作为状态回调函数启用吊舱控制
|
||||
gimbal->open(gimableCallback);
|
||||
// 定义相机
|
||||
sv::Camera cap;
|
||||
// 设置相机流媒体地址为 192.168.2.64
|
||||
cap.setIp("192.168.2.64");
|
||||
// 设置获取画面分辨率为720P
|
||||
cap.setWH(1280, 720);
|
||||
// 设置视频帧率为30帧
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::G1);
|
||||
cap.setStream("192.168.2.64",554);
|
||||
cap.setWH(ad.image_width, ad.image_height);
|
||||
cap.setFps(30);
|
||||
// 开启相机
|
||||
cap.open(sv::CameraType::G1);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
// // 定义相机
|
||||
// sv::Camera cap;
|
||||
// // 设置相机流媒体地址为 192.168.2.64
|
||||
// cap.setIp("192.168.2.64");
|
||||
// // 设置获取画面分辨率为720P
|
||||
// cap.setWH(1280, 720);
|
||||
// // 设置视频帧率为30帧
|
||||
// cap.setFps(30);
|
||||
// // 开启相机
|
||||
// cap.open(sv::CameraType::G1);
|
||||
|
||||
// 定义一个窗口 才可以在上面操作
|
||||
cv::namedWindow(RGB_WINDOW);
|
||||
// 设置窗口操作回调函数,该函数实现吊舱控制
|
||||
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
|
||||
// 实例化Aruco检测器类
|
||||
sv::ArucoDetector ad;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
ad.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
ad.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
|
||||
sv::UDPServer udp;
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
|
|
|
@ -2,21 +2,22 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
|
||||
#include "sv_camera.h"
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
// 实例化 圆形降落标志 检测器类
|
||||
sv::LandingMarkerDetector lmd;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
lmd.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
lmd.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
lmd.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(lmd.image_width, lmd.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -9,19 +10,19 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化
|
||||
sv::CommonObjectDetector cod;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
sv::MultipleObjectTracker mot;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
mot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
mot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
mot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
mot.init(&cod);
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
cap.setWH(mot.image_width, mot.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(cod.image_width, cod.image_height);
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
@ -34,9 +35,9 @@ int main(int argc, char *argv[]) {
|
|||
cv::resize(img, img, cv::Size(mot.image_width, mot.image_height));
|
||||
|
||||
// 执行通用目标检测
|
||||
sv::TargetsInFrame person_tgts = mot.track(img, tgts);
|
||||
mot.track(img, tgts);
|
||||
// 可视化检测结果,叠加到img上
|
||||
sv::drawTargetsInFrame(img, person_tgts);
|
||||
sv::drawTargetsInFrame(img, tgts);
|
||||
|
||||
// 显示检测结果img
|
||||
cv::imshow("img", img);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
|
||||
#include "sv_camera.h"
|
||||
using namespace std;
|
||||
|
||||
// 定义窗口名称
|
||||
|
@ -28,15 +28,17 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化 框选目标跟踪类
|
||||
sv::SingleObjectTracker sot;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
|
||||
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1920x1080.yaml");
|
||||
sot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
sot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(sot.image_width, sot.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include <stdio.h>
|
||||
#define SERV_PORT 20166
|
||||
|
||||
|
||||
typedef unsigned char byte;
|
||||
using namespace std;
|
||||
|
||||
|
||||
|
@ -23,9 +23,9 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
|
||||
int upd_msg_len = 1024 * 6; // max_objects = 100
|
||||
unsigned char upd_msg[upd_msg_len];
|
||||
byte upd_msg[upd_msg_len];
|
||||
int msg_queue_len = 1024 * 1024; // 1M
|
||||
unsigned char msg_queue[msg_queue_len];
|
||||
byte msg_queue[msg_queue_len];
|
||||
|
||||
int addr_len = sizeof(struct sockaddr_in);
|
||||
int start_index = 0, end_index = 0;
|
||||
|
@ -63,21 +63,21 @@ cout << n << ", " << start_index << ", " << end_index << endl;
|
|||
if (end_index - i >= ilen + 2 && msg_queue[i+ilen] == 0xFB && msg_queue[i+ilen+1] == 0xFD)
|
||||
{
|
||||
cout << "FOUND 0xFAFC & 0xFBFD" << endl;
|
||||
unsigned char* msg_type = reinterpret_cast<unsigned char*>(&msg_queue[i+2]);
|
||||
byte* msg_type = reinterpret_cast<byte*>(&msg_queue[i+2]);
|
||||
cout << "Type: " << (int) *msg_type << endl;
|
||||
unsigned short* year = reinterpret_cast<unsigned short*>(&msg_queue[i+7]);
|
||||
unsigned char* month = reinterpret_cast<unsigned char*>(&msg_queue[i+9]);
|
||||
unsigned char* day = reinterpret_cast<unsigned char*>(&msg_queue[i+10]);
|
||||
unsigned char* hour = reinterpret_cast<unsigned char*>(&msg_queue[i+11]);
|
||||
unsigned char* minute = reinterpret_cast<unsigned char*>(&msg_queue[i+12]);
|
||||
unsigned char* second = reinterpret_cast<unsigned char*>(&msg_queue[i+13]);
|
||||
byte* month = reinterpret_cast<byte*>(&msg_queue[i+9]);
|
||||
byte* day = reinterpret_cast<byte*>(&msg_queue[i+10]);
|
||||
byte* hour = reinterpret_cast<byte*>(&msg_queue[i+11]);
|
||||
byte* minute = reinterpret_cast<byte*>(&msg_queue[i+12]);
|
||||
byte* second = reinterpret_cast<byte*>(&msg_queue[i+13]);
|
||||
unsigned short* millisecond = reinterpret_cast<unsigned short*>(&msg_queue[i+14]);
|
||||
cout << "Time: " << *year << "-" << (int) *month << "-" << (int) *day << " " << (int) *hour << ":" << (int) *minute << ":" << (int) *second << " " << *millisecond << endl;
|
||||
|
||||
unsigned char* index_d1 = reinterpret_cast<unsigned char*>(&msg_queue[i+16]);
|
||||
unsigned char* index_d2 = reinterpret_cast<unsigned char*>(&msg_queue[i+17]);
|
||||
unsigned char* index_d3 = reinterpret_cast<unsigned char*>(&msg_queue[i+18]);
|
||||
unsigned char* index_d4 = reinterpret_cast<unsigned char*>(&msg_queue[i+19]);
|
||||
byte* index_d1 = reinterpret_cast<byte*>(&msg_queue[i+16]);
|
||||
byte* index_d2 = reinterpret_cast<byte*>(&msg_queue[i+17]);
|
||||
byte* index_d3 = reinterpret_cast<byte*>(&msg_queue[i+18]);
|
||||
byte* index_d4 = reinterpret_cast<byte*>(&msg_queue[i+19]);
|
||||
int mp = i+20;
|
||||
if ((*index_d4) & 0x01 == 0x01)
|
||||
{
|
||||
|
@ -152,13 +152,13 @@ cout << n << ", " << start_index << ", " << end_index << endl;
|
|||
}
|
||||
for (int j=0; j<n_objects; j++)
|
||||
{
|
||||
unsigned char* index_f1 = reinterpret_cast<unsigned char*>(&msg_queue[mp]);
|
||||
byte* index_f1 = reinterpret_cast<byte*>(&msg_queue[mp]);
|
||||
mp++;
|
||||
unsigned char* index_f2 = reinterpret_cast<unsigned char*>(&msg_queue[mp]);
|
||||
byte* index_f2 = reinterpret_cast<byte*>(&msg_queue[mp]);
|
||||
mp++;
|
||||
unsigned char* index_f3 = reinterpret_cast<unsigned char*>(&msg_queue[mp]);
|
||||
byte* index_f3 = reinterpret_cast<byte*>(&msg_queue[mp]);
|
||||
mp++;
|
||||
unsigned char* index_f4 = reinterpret_cast<unsigned char*>(&msg_queue[mp]);
|
||||
byte* index_f4 = reinterpret_cast<byte*>(&msg_queue[mp]);
|
||||
mp++;
|
||||
if ((*index_f4) & 0x01 == 0x01 && (*index_f4) & 0x02 == 0x02)
|
||||
{
|
||||
|
|
|
@ -2,20 +2,21 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
|
||||
#include "sv_camera.h"
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
// 实例化Aruco检测器类
|
||||
sv::ArucoDetector ad;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
ad.loadCameraParams("/home/amov/SpireCV/calib_webcam_1280x720.yaml");
|
||||
ad.loadCameraParams("/home/amov/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(ad.image_width, ad.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
sv::UDPServer udp;
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
|
||||
#include "sv_camera.h"
|
||||
using namespace std;
|
||||
|
||||
struct node
|
||||
|
@ -20,18 +20,17 @@ int main(int argc, char *argv[])
|
|||
// 实例化 框选目标跟踪类
|
||||
sv::VeriDetector veri;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
veri.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
veri.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
veri.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
sv::CommonObjectDetector cod;
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(cod.image_width, cod.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
|
|
@ -2,21 +2,22 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
|
||||
#include "sv_camera.h"
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
// 实例化 通用目标 检测器类
|
||||
sv::CommonObjectDetector cod;
|
||||
// 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件
|
||||
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
|
||||
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
|
||||
cod.loadCameraParams("/home/amov/SpireCV/calib_webcam_640x480.yaml");
|
||||
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(cod.image_width, cod.image_height);
|
||||
cap.setFps(30);
|
||||
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
|
||||
// cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
// 实例化OpenCV的Mat类,用于内存单帧图像
|
||||
cv::Mat img;
|
||||
int frame_id = 0;
|
||||
|
@ -24,7 +25,7 @@ int main(int argc, char *argv[]) {
|
|||
// 实例化视频保存类
|
||||
sv::VideoWriter vw;
|
||||
// 设置保存路径"/home/amov/Videos",保存图像尺寸(640,480),帧频25Hz,同步保存检测结果(.svj)
|
||||
vw.setup(sv::get_home() + "/Videos", cv::Size(cod.image_width, cod.image_height), 25, true);
|
||||
vw.setup("/home/amov/Videos", cv::Size(640, 480), 25, true);
|
||||
|
||||
while (1)
|
||||
{
|
||||
|
@ -32,7 +33,7 @@ int main(int argc, char *argv[]) {
|
|||
sv::TargetsInFrame tgts(frame_id++);
|
||||
// 读取一帧图像到img
|
||||
cap.read(img);
|
||||
cv::resize(img, img, cv::Size(cod.image_width, cod.image_height));
|
||||
cv::resize(img, img, cv::Size(640, 480));
|
||||
|
||||
// 执行通用目标检测
|
||||
cod.detect(img, tgts);
|
||||
|
|
|
@ -2,15 +2,17 @@
|
|||
#include <string>
|
||||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include "sv_camera.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
// 打开摄像头
|
||||
sv::Camera cap;
|
||||
// cap.setWH(1280, 720);
|
||||
// cap.setFps(30);
|
||||
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::WEBCAM);
|
||||
cap.setIndex(0);
|
||||
cap.setWH(1280, 720);
|
||||
cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
// 实例化视频推流类sv::VideoStreamer
|
||||
sv::VideoStreamer streamer;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
// 包含SpireCV SDK头文件
|
||||
#include <sv_world.h>
|
||||
#include <chrono>
|
||||
#include "sv_camera.h"
|
||||
|
||||
// yaw roll pitch
|
||||
double gimbalEulerAngle[3];
|
||||
|
@ -143,14 +144,20 @@ int main(int argc, char *argv[])
|
|||
std::cout << " pass... " << std::endl;
|
||||
std::cout << " start image test " << std::endl;
|
||||
|
||||
sv::Camera cap;
|
||||
cap.setIp(argv[2]);
|
||||
// 打开摄像头
|
||||
sv::Camera cap(sv::CameraType::G1);
|
||||
cap.setStream(argv[2], 554);
|
||||
cap.setWH(1280, 720);
|
||||
cap.setFps(30);
|
||||
cap.open(); // CameraID 0
|
||||
|
||||
cap.open(sv::CameraType::G1);
|
||||
// sv::Camera cap;
|
||||
// cap.setIp(argv[2]);
|
||||
// cap.setWH(1280, 720);
|
||||
// cap.setFps(30);
|
||||
// cap.open(sv::CameraType::G1);
|
||||
|
||||
if (!cap.isRunning())
|
||||
if (!cap.isActive())
|
||||
{
|
||||
std::cout << " gimbal image error , failed !!!!!" << std::endl;
|
||||
return -1;
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
python3 run_mot_challenge.py \
|
||||
--BENCHMARK MOT17 \
|
||||
--GT_FOLDER ../../../dataset/MOT17/train \
|
||||
--TRACKERS_FOLDER ../../../dataset/pred_mot17 \
|
||||
--SKIP_SPLIT_FOL True \
|
||||
--SEQMAP_FILE ../../../dataset/MOT17/val_seqmap.txt \
|
||||
--TRACKERS_TO_EVAL '' \
|
||||
--METRICS HOTA CLEAR Identity \
|
||||
#--show
|
|
@ -1,93 +0,0 @@
|
|||
|
||||
""" run_mot_challenge.py
|
||||
|
||||
Run example:
|
||||
run_mot_challenge.py --USE_PARALLEL False --METRICS Hota --TRACKERS_TO_EVAL Lif_T
|
||||
|
||||
Command Line Arguments: Defaults, # Comments
|
||||
Eval arguments:
|
||||
'USE_PARALLEL': False,
|
||||
'NUM_PARALLEL_CORES': 8,
|
||||
'BREAK_ON_ERROR': True,
|
||||
'PRINT_RESULTS': True,
|
||||
'PRINT_ONLY_COMBINED': False,
|
||||
'PRINT_CONFIG': True,
|
||||
'TIME_PROGRESS': True,
|
||||
'OUTPUT_SUMMARY': True,
|
||||
'OUTPUT_DETAILED': True,
|
||||
'PLOT_CURVES': True,
|
||||
Dataset arguments:
|
||||
'GT_FOLDER': os.path.join(code_path, 'data/gt/mot_challenge/'), # Location of GT data
|
||||
'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/mot_challenge/'), # Trackers location
|
||||
'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
|
||||
'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
|
||||
'CLASSES_TO_EVAL': ['pedestrian'], # Valid: ['pedestrian']
|
||||
'BENCHMARK': 'MOT17', # Valid: 'MOT17', 'MOT16', 'MOT20', 'MOT15'
|
||||
'SPLIT_TO_EVAL': 'train', # Valid: 'train', 'test', 'all'
|
||||
'INPUT_AS_ZIP': False, # Whether tracker input files are zipped
|
||||
'PRINT_CONFIG': True, # Whether to print current config
|
||||
'DO_PREPROC': True, # Whether to perform preprocessing (never done for 2D_MOT_2015)
|
||||
'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
|
||||
'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
|
||||
Metric arguments:
|
||||
'METRICS': ['HOTA', 'CLEAR', 'Identity', 'VACE']
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
from multiprocessing import freeze_support
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
import trackeval # noqa: E402
|
||||
|
||||
if __name__ == '__main__':
|
||||
freeze_support()
|
||||
|
||||
# Command line interface:
|
||||
default_eval_config = trackeval.Evaluator.get_default_eval_config()
|
||||
default_eval_config['DISPLAY_LESS_PROGRESS'] = False
|
||||
default_dataset_config = trackeval.datasets.MotChallenge2DBox.get_default_dataset_config()
|
||||
default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity'], 'THRESHOLD': 0.5}
|
||||
config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs
|
||||
parser = argparse.ArgumentParser()
|
||||
for setting in config.keys():
|
||||
if type(config[setting]) == list or type(config[setting]) == type(None):
|
||||
parser.add_argument("--" + setting, nargs='+')
|
||||
else:
|
||||
parser.add_argument("--" + setting)
|
||||
args = parser.parse_args().__dict__
|
||||
args['SEQMAP_FILE'] = args['SEQMAP_FILE'][0]
|
||||
# args['SEQ_INFO']={'MOT17-02-DPM':600}
|
||||
for setting in args.keys():
|
||||
if args[setting] is not None:
|
||||
if type(config[setting]) == type(True):
|
||||
if args[setting] == 'True':
|
||||
x = True
|
||||
elif args[setting] == 'False':
|
||||
x = False
|
||||
else:
|
||||
raise Exception('Command line parameter ' + setting + 'must be True or False')
|
||||
elif type(config[setting]) == type(1):
|
||||
x = int(args[setting])
|
||||
elif type(args[setting]) == type(None):
|
||||
x = None
|
||||
elif setting == 'SEQ_INFO':
|
||||
x = dict(zip(args[setting], [None]*len(args[setting])))
|
||||
else:
|
||||
x = args[setting]
|
||||
config[setting] = x
|
||||
eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()}
|
||||
dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()}
|
||||
metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()}
|
||||
|
||||
# Run code
|
||||
evaluator = trackeval.Evaluator(eval_config)
|
||||
dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)]
|
||||
metrics_list = []
|
||||
for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity, trackeval.metrics.VACE]:
|
||||
if metric.get_name() in metrics_config['METRICS']:
|
||||
metrics_list.append(metric(metrics_config))
|
||||
if len(metrics_list) == 0:
|
||||
raise Exception('No metrics selected for evaluation')
|
||||
evaluator.evaluate(dataset_list, metrics_list)
|
|
@ -1,23 +0,0 @@
|
|||
import os
|
||||
import os.path as osp
|
||||
|
||||
|
||||
|
||||
def makedirs(path):
|
||||
if not osp.exists(path):
|
||||
os.makedirs(path)
|
||||
else:
|
||||
print('this file is already exists.')
|
||||
|
||||
|
||||
if __name__=="__main__":
|
||||
dst_path = '/home/bitwrj/SpireCV/dataset/MOT17'
|
||||
mode = 'train'
|
||||
dst_file_name = 'val_seqmap.txt'
|
||||
dst_file = osp.join(dst_path,dst_file_name)
|
||||
test_files = os.listdir(osp.join(dst_path,mode))
|
||||
test_files.sort()
|
||||
with open(dst_file, 'w+') as f:
|
||||
f.writelines('name\n')
|
||||
for test_file in test_files:
|
||||
f.writelines(test_file+'\n')
|
|
@ -17,7 +17,6 @@ cd nv-codec-headers
|
|||
git checkout n11.1.5.0
|
||||
sudo make install
|
||||
cd ..
|
||||
sudo rm -r nv-codec-headers
|
||||
|
||||
wget https://ffmpeg.org/releases/ffmpeg-4.2.5.tar.bz2
|
||||
tar -xjf ffmpeg-4.2.5.tar.bz2
|
||||
|
|
|
@ -3,6 +3,12 @@
|
|||
root_dir=${HOME}"/SpireCV/models"
|
||||
root_server="https://download.amovlab.com/model"
|
||||
|
||||
sv_params1=${HOME}"/SpireCV/sv_algorithm_params.json"
|
||||
sv_params2=${HOME}"/SpireCV/sv_algorithm_params_coco_640.json"
|
||||
sv_params3=${HOME}"/SpireCV/sv_algorithm_params_coco_1280.json"
|
||||
camera_params1=${HOME}"/SpireCV/calib_webcam_640x480.yaml"
|
||||
camera_params2=${HOME}"/SpireCV/calib_webcam_1280x720.yaml"
|
||||
|
||||
coco_model1="COCO-yolov5s.wts"
|
||||
coco_model2="COCO-yolov5s6.wts"
|
||||
coco_model3="COCO-yolov5s-seg.wts"
|
||||
|
@ -40,6 +46,28 @@ if [ ! -d ${root_dir} ]; then
|
|||
mkdir -p ${root_dir}
|
||||
fi
|
||||
|
||||
if [ ! -f ${sv_params1} ]; then
|
||||
echo -e "\033[32m[INFO]: ${sv_params1} not exist, downloading ... \033[0m"
|
||||
wget -O ${sv_params1} ${root_server}/install/a-params/sv_algorithm_params.json
|
||||
fi
|
||||
if [ ! -f ${sv_params2} ]; then
|
||||
echo -e "\033[32m[INFO]: ${sv_params2} not exist, downloading ... \033[0m"
|
||||
wget -O ${sv_params2} ${root_server}/install/a-params/sv_algorithm_params_coco_640.json
|
||||
fi
|
||||
if [ ! -f ${sv_params3} ]; then
|
||||
echo -e "\033[32m[INFO]: ${sv_params3} not exist, downloading ... \033[0m"
|
||||
wget -O ${sv_params3} ${root_server}/install/a-params/sv_algorithm_params_coco_1280.json
|
||||
fi
|
||||
|
||||
if [ ! -f ${camera_params1} ]; then
|
||||
echo -e "\033[32m[INFO]: ${camera_params1} not exist, downloading ... \033[0m"
|
||||
wget -O ${camera_params1} ${root_server}/install/c-params/calib_webcam_640x480.yaml
|
||||
fi
|
||||
if [ ! -f ${camera_params2} ]; then
|
||||
echo -e "\033[32m[INFO]: ${camera_params2} not exist, downloading ... \033[0m"
|
||||
wget -O ${camera_params2} ${root_server}/install/c-params/calib_webcam_1280x720.yaml
|
||||
fi
|
||||
|
||||
if [ ! -f ${coco_model1_fn} ]; then
|
||||
echo -e "\033[32m[INFO]: ${coco_model1_fn} not exist, downloading ... \033[0m"
|
||||
wget -O ${coco_model1_fn} ${root_server}/install/${coco_model1}
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
root_dir=${HOME}"/SpireCV/models"
|
||||
root_server="https://download.amovlab.com/model"
|
||||
|
||||
sv_params1=${HOME}"/SpireCV/sv_algorithm_params.json"
|
||||
sv_params2=${HOME}"/SpireCV/sv_algorithm_params_coco_640.json"
|
||||
sv_params3=${HOME}"/SpireCV/sv_algorithm_params_coco_1280.json"
|
||||
camera_params1=${HOME}"/SpireCV/calib_webcam_640x480.yaml"
|
||||
camera_params2=${HOME}"/SpireCV/calib_webcam_1280x720.yaml"
|
||||
|
||||
|
||||
if [ ! -d ${root_dir} ]; then
|
||||
echo -e "\033[32m[INFO]: ${root_dir} not exist, creating it ... \033[0m"
|
||||
mkdir -p ${root_dir}
|
||||
fi
|
||||
|
||||
if [ ! -f ${sv_params1} ]; then
|
||||
echo -e "\033[32m[INFO]: ${sv_params1} not exist, downloading ... \033[0m"
|
||||
wget -O ${sv_params1} ${root_server}/install/a-params/sv_algorithm_params.json
|
||||
fi
|
||||
if [ ! -f ${sv_params2} ]; then
|
||||
echo -e "\033[32m[INFO]: ${sv_params2} not exist, downloading ... \033[0m"
|
||||
wget -O ${sv_params2} ${root_server}/install/a-params/sv_algorithm_params_coco_640.json
|
||||
fi
|
||||
if [ ! -f ${sv_params3} ]; then
|
||||
echo -e "\033[32m[INFO]: ${sv_params3} not exist, downloading ... \033[0m"
|
||||
wget -O ${sv_params3} ${root_server}/install/a-params/sv_algorithm_params_coco_1280.json
|
||||
fi
|
||||
|
||||
if [ ! -f ${camera_params1} ]; then
|
||||
echo -e "\033[32m[INFO]: ${camera_params1} not exist, downloading ... \033[0m"
|
||||
wget -O ${camera_params1} ${root_server}/install/c-params/calib_webcam_640x480.yaml
|
||||
fi
|
||||
if [ ! -f ${camera_params2} ]; then
|
||||
echo -e "\033[32m[INFO]: ${camera_params2} not exist, downloading ... \033[0m"
|
||||
wget -O ${camera_params2} ${root_server}/install/c-params/calib_webcam_1280x720.yaml
|
||||
fi
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
sudo apt install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
|
||||
sudo apt install -y libgstreamer-plugins-bad1.0-dev gstreamer1.0-plugins-base
|
||||
sudo apt install -y gstreamer1.0-plugins-good gstreamer1.0-plugins-bad
|
||||
sudo apt install -y gstreamer1.0-plugins-ugly gstreamer1.0-libav gstreamer1.0-doc
|
||||
sudo apt install -y gstreamer1.0-tools gstreamer1.0-x gstreamer1.0-alsa
|
||||
sudo apt install -y gstreamer1.0-gl gstreamer1.0-gtk3 gstreamer1.0-qt5
|
||||
sudo apt install -y gstreamer1.0-pulseaudio
|
||||
sudo apt install -y gtk-doc-tools
|
||||
sudo apt install -y libeigen3-dev libfmt-dev v4l-utils
|
||||
|
||||
sudo apt -y install autotools-dev automake m4 perl
|
||||
sudo apt -y install libtool
|
||||
autoreconf -ivf
|
||||
|
||||
git clone https://gitee.com/jario-jin/gst-rtsp-server-b18.git
|
||||
cd gst-rtsp-server-b18
|
||||
./autogen.sh
|
||||
make
|
||||
sudo make install
|
||||
cd ..
|
||||
sudo rm -r gst-rtsp-server-b18
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
sudo apt install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
|
||||
sudo apt install -y libgstreamer-plugins-bad1.0-dev gstreamer1.0-plugins-base
|
||||
sudo apt install -y gstreamer1.0-plugins-good gstreamer1.0-plugins-bad
|
||||
sudo apt install -y gstreamer1.0-plugins-ugly gstreamer1.0-libav gstreamer1.0-doc
|
||||
sudo apt install -y gstreamer1.0-tools gstreamer1.0-x gstreamer1.0-alsa
|
||||
sudo apt install -y gstreamer1.0-gl gstreamer1.0-gtk3 gstreamer1.0-qt5
|
||||
sudo apt install -y gstreamer1.0-pulseaudio
|
||||
sudo apt install -y gtk-doc-tools
|
||||
sudo apt install -y libeigen3-dev libfmt-dev v4l-utils
|
||||
|
||||
git clone https://gitee.com/jario-jin/gst-rtsp-server-b18.git
|
||||
cd gst-rtsp-server-b18
|
||||
./autogen.sh
|
||||
make
|
||||
sudo make install
|
||||
cd ..
|
||||
sudo rm -r gst-rtsp-server-b18
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
|
||||
wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
|
||||
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
|
||||
wget https://download.amovlab.com/model/deps/opencv_cache_x86-4.7.0.zip
|
||||
|
||||
current_dir=$(pwd)
|
||||
package_dir="."
|
||||
mkdir ~/opencv_build
|
||||
|
||||
|
||||
if [ ! -d ""$package_dir"" ];then
|
||||
echo "\033[31m[ERROR]: $package_dir not exist!: \033[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
|
||||
# sudo add-apt-repository "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports/ xenial main multiverse restricted universe"
|
||||
sudo apt update
|
||||
sudo apt install -y build-essential
|
||||
sudo apt install -y cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
|
||||
|
||||
sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32
|
||||
sudo apt update
|
||||
sudo apt install -y libjasper1 libjasper-dev
|
||||
|
||||
sudo apt install -y python3-dev python3-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev
|
||||
sudo apt install -y libdc1394-22-dev
|
||||
sudo apt install -y libcurl4 build-essential pkg-config cmake libopenblas-dev libeigen3-dev \
|
||||
libtbb-dev libavcodec-dev libavformat-dev libgstreamer-plugins-base1.0-dev \
|
||||
libgstreamer1.0-dev libswscale-dev libgtk-3-dev libpng-dev libjpeg-dev \
|
||||
libcanberra-gtk-module libcanberra-gtk3-module
|
||||
|
||||
|
||||
echo "\033[32m[INFO]:\033[0m unzip opencv-4.7.0.zip ..."
|
||||
unzip -q -o $package_dir/opencv-4.7.0.zip -d ~/opencv_build
|
||||
|
||||
echo "\033[32m[INFO]:\033[0m unzip opencv_contrib-4.7.0.zip ..."
|
||||
unzip -q -o $package_dir/opencv_contrib-4.7.0.zip -d ~/opencv_build
|
||||
|
||||
echo "\033[32m[INFO]:\033[0m unzip opencv_cache_x86-4.7.0.zip ..."
|
||||
unzip -q -o $package_dir/opencv_cache_x86-4.7.0.zip -d ~/opencv_build
|
||||
|
||||
|
||||
sudo rm opencv-4.7.0.zip
|
||||
sudo rm opencv_contrib-4.7.0.zip
|
||||
sudo rm opencv_cache_x86-4.7.0.zip
|
||||
|
||||
cd ~/opencv_build/opencv-4.7.0
|
||||
mkdir .cache
|
||||
|
||||
cp -r ~/opencv_build/opencv_cache_x86-4.7.0/* ~/opencv_build/opencv-4.7.0/.cache/
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
|
||||
cmake -D CMAKE_BUILD_TYPE=Release \
|
||||
-D WITH_CUDA=OFF \
|
||||
-D OPENCV_ENABLE_NONFREE=ON \
|
||||
-D CMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-4.7.0/modules ..
|
||||
|
||||
make -j2
|
||||
sudo make install
|
||||
|
||||
cd
|
||||
sudo rm -r ~/opencv_build
|
||||
cd ${current_dir}
|
|
@ -5,7 +5,7 @@ wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
|
|||
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
|
||||
wget https://download.amovlab.com/model/deps/opencv_cache-4.7.0.zip
|
||||
|
||||
current_dir=$(pwd)
|
||||
|
||||
package_dir="."
|
||||
mkdir ~/opencv_build
|
||||
|
||||
|
@ -52,4 +52,3 @@ sudo make install
|
|||
|
||||
cd
|
||||
sudo rm -r ~/opencv_build
|
||||
cd ${current_dir}
|
||||
|
|
|
@ -5,7 +5,7 @@ wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
|
|||
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
|
||||
wget https://download.amovlab.com/model/deps/opencv_cache-4.7.0.zip
|
||||
|
||||
current_dir=$(pwd)
|
||||
|
||||
package_dir="."
|
||||
mkdir ~/opencv_build
|
||||
|
||||
|
@ -52,4 +52,3 @@ sudo make install
|
|||
|
||||
cd
|
||||
sudo rm -r ~/opencv_build
|
||||
cd ${current_dir}
|
||||
|
|
|
@ -1,119 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
import os
|
||||
import requests
|
||||
import argparse
|
||||
|
||||
|
||||
root_url = "https://download.amovlab.com/model/SpireCV-models/"
|
||||
model_list_url = root_url + "model-list.txt"
|
||||
root_path = os.path.expanduser("~") + "/SpireCV/models"
|
||||
print("MODEL PATH:", root_path)
|
||||
if not os.path.exists(root_path):
|
||||
os.makedirs(root_path)
|
||||
list_file = os.path.join(root_path, "model-list.txt")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="SpireCV Model SYNC")
|
||||
parser.add_argument(
|
||||
"-p", "--platform",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Supported Platforms: nv (Nvidia), int (Intel)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.platform in ['nv', 'nvidia', 'Nv', 'Nvidia']:
|
||||
prefix = 'Nv'
|
||||
elif args.platform in ['int', 'intel', 'Int', 'Intel']:
|
||||
prefix = 'Int'
|
||||
else:
|
||||
raise Exception("Platform NOT Support!")
|
||||
|
||||
r = requests.get(model_list_url)
|
||||
with open(list_file, "wb") as f:
|
||||
f.write(r.content)
|
||||
|
||||
with open(list_file, 'r') as file:
|
||||
lines = file.readlines()
|
||||
|
||||
need_switch = False
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if len(line) > 0:
|
||||
model_file = os.path.join(root_path, line)
|
||||
if not os.path.exists(model_file) and (line.startswith(prefix) or line.startswith('Ocv')):
|
||||
print("[1] Downloading Model:", line, "...")
|
||||
r = requests.get(root_url + line)
|
||||
with open(model_file, "wb") as f:
|
||||
f.write(r.content)
|
||||
need_switch = True
|
||||
|
||||
if os.path.exists(model_file):
|
||||
print("[1] Model:", line, "EXIST!")
|
||||
if line.startswith('Nv'):
|
||||
net = line.split('-')[2]
|
||||
if net.startswith("yolov5"):
|
||||
if len(net.split('_')) == 3:
|
||||
name, seg, ncls = net.split('_')
|
||||
engine_fn = os.path.splitext(model_file)[0].replace(net, name + "_" + seg + '_b1_' + ncls) + '.engine'
|
||||
online_fn = os.path.splitext(model_file)[0].replace(net, name + "_" + seg + '_b1_' + ncls) + '-online.engine'
|
||||
else:
|
||||
name, ncls = net.split('_')
|
||||
engine_fn = os.path.splitext(model_file)[0].replace(net, name + '_b1_' + ncls) + '.engine'
|
||||
online_fn = os.path.splitext(model_file)[0].replace(net, name + '_b1_' + ncls) + '-online.engine'
|
||||
else:
|
||||
engine_fn = os.path.splitext(model_file)[0] + '.engine'
|
||||
online_fn = os.path.splitext(model_file)[0] + '-online.engine'
|
||||
if not os.path.exists(engine_fn) and not os.path.exists(online_fn):
|
||||
if net.startswith("yolov5"):
|
||||
if len(net.split('_')) == 3:
|
||||
name, seg, ncls = net.split('_')
|
||||
cmd = "SpireCVSeg -s {} {} {} {}".format(
|
||||
model_file, engine_fn, ncls[1:], name[6:]
|
||||
)
|
||||
else:
|
||||
name, ncls = net.split('_')
|
||||
cmd = "SpireCVDet -s {} {} {} {}".format(
|
||||
model_file, engine_fn, ncls[1:], name[6:]
|
||||
)
|
||||
elif line.endswith("onnx"):
|
||||
cmd = ("/usr/src/tensorrt/bin/trtexec --explicitBatch --onnx={} "
|
||||
"--saveEngine={} --fp16").format(
|
||||
model_file, engine_fn
|
||||
)
|
||||
print(" [2] Converting Model:", line, "->", engine_fn, "...")
|
||||
result = os.popen(cmd).read()
|
||||
need_switch = True
|
||||
else:
|
||||
print(" [2] Model Converting FINISH!")
|
||||
model_file = engine_fn
|
||||
|
||||
if not line.startswith('Ocv') and need_switch:
|
||||
ext = os.path.splitext(model_file)[1]
|
||||
fn_prefix = '-'.join(os.path.basename(model_file).split('-')[:3])
|
||||
file_names = os.listdir(root_path)
|
||||
selected = []
|
||||
for file_name in file_names:
|
||||
if file_name.startswith(fn_prefix) and file_name.endswith(ext):
|
||||
selected.append(file_name)
|
||||
if len(selected) > 0:
|
||||
for i, sel in enumerate(selected):
|
||||
if sel.endswith('-online' + ext):
|
||||
os.rename(
|
||||
os.path.join(root_path, sel),
|
||||
os.path.join(root_path, '-'.join(sel.split('-')[:4])) + ext
|
||||
)
|
||||
selected[i] = '-'.join(sel.split('-')[:4]) + ext
|
||||
selected.sort(reverse=True)
|
||||
os.rename(
|
||||
os.path.join(root_path, selected[0]),
|
||||
os.path.join(root_path, os.path.splitext(selected[0])[0] + "-online" + ext)
|
||||
)
|
||||
online_model = os.path.splitext(selected[0])[0] + "-online" + ext
|
||||
print(" [3] Model {} ONLINE *".format(online_model))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,39 +0,0 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
root_dir=${HOME}"/SpireCV/models"
|
||||
root_server="https://download.amovlab.com/model"
|
||||
|
||||
sv_params1=${HOME}"/SpireCV/sv_algorithm_params.json"
|
||||
sv_params2=${HOME}"/SpireCV/sv_algorithm_params_coco_640.json"
|
||||
sv_params3=${HOME}"/SpireCV/sv_algorithm_params_coco_1280.json"
|
||||
camera_params1=${HOME}"/SpireCV/calib_webcam_640x480.yaml"
|
||||
camera_params2=${HOME}"/SpireCV/calib_webcam_1280x720.yaml"
|
||||
|
||||
|
||||
if [ ! -d ${root_dir} ]; then
|
||||
echo -e "\033[32m[INFO]: ${root_dir} not exist, creating it ... \033[0m"
|
||||
mkdir -p ${root_dir}
|
||||
fi
|
||||
|
||||
if [ ! -f ${sv_params1} ]; then
|
||||
echo -e "\033[32m[INFO]: ${sv_params1} not exist, downloading ... \033[0m"
|
||||
wget -O ${sv_params1} ${root_server}/install/a-params/sv_algorithm_params.json
|
||||
fi
|
||||
if [ ! -f ${sv_params2} ]; then
|
||||
echo -e "\033[32m[INFO]: ${sv_params2} not exist, downloading ... \033[0m"
|
||||
wget -O ${sv_params2} ${root_server}/install/a-params/sv_algorithm_params_coco_640.json
|
||||
fi
|
||||
if [ ! -f ${sv_params3} ]; then
|
||||
echo -e "\033[32m[INFO]: ${sv_params3} not exist, downloading ... \033[0m"
|
||||
wget -O ${sv_params3} ${root_server}/install/a-params/sv_algorithm_params_coco_1280.json
|
||||
fi
|
||||
|
||||
if [ ! -f ${camera_params1} ]; then
|
||||
echo -e "\033[32m[INFO]: ${camera_params1} not exist, downloading ... \033[0m"
|
||||
wget -O ${camera_params1} ${root_server}/install/c-params/calib_webcam_640x480.yaml
|
||||
fi
|
||||
if [ ! -f ${camera_params2} ]; then
|
||||
echo -e "\033[32m[INFO]: ${camera_params2} not exist, downloading ... \033[0m"
|
||||
wget -O ${camera_params2} ${root_server}/install/c-params/calib_webcam_1280x720.yaml
|
||||
fi
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
|
||||
sudo apt install -y \
|
||||
build-essential yasm cmake libtool libc6 libc6-dev unzip wget libeigen3-dev libfmt-dev \
|
||||
libnuma1 libnuma-dev libx264-dev libx265-dev libfaac-dev libssl-dev v4l-utils
|
||||
|
||||
current_dir=$(pwd)
|
||||
root_dir=${HOME}"/SpireCV"
|
||||
if [ ! -d ${root_dir} ]; then
|
||||
echo -e "\033[32m[INFO]: ${root_dir} not exist, creating it ... \033[0m"
|
||||
mkdir -p ${root_dir}
|
||||
fi
|
||||
cd ${root_dir}
|
||||
|
||||
git clone https://gitee.com/jario-jin/nv-codec-headers.git
|
||||
cd nv-codec-headers
|
||||
git checkout n11.1.5.0
|
||||
sudo make install
|
||||
cd ..
|
||||
sudo rm -r nv-codec-headers
|
||||
|
||||
wget https://ffmpeg.org/releases/ffmpeg-4.2.5.tar.bz2
|
||||
tar -xjf ffmpeg-4.2.5.tar.bz2
|
||||
cd ffmpeg-4.2.5
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
sed -i 's#_30#_75#' configure; sed -i 's#_30#_75#' configure
|
||||
./configure \
|
||||
--enable-nonfree \
|
||||
--enable-gpl \
|
||||
--enable-shared \
|
||||
--enable-ffmpeg \
|
||||
--enable-ffplay \
|
||||
--enable-ffprobe \
|
||||
--enable-libx264 \
|
||||
--enable-libx265 \
|
||||
--enable-cuda-nvcc \
|
||||
--enable-nvenc \
|
||||
--enable-cuda \
|
||||
--enable-cuvid \
|
||||
--enable-libnpp \
|
||||
--extra-libs="-lpthread -lm" \
|
||||
--extra-cflags=-I/usr/local/cuda/include \
|
||||
--extra-ldflags=-L/usr/local/cuda/lib64
|
||||
make -j8
|
||||
sudo make install
|
||||
cd ..
|
||||
|
||||
git clone https://gitee.com/jario-jin/ZLMediaKit.git
|
||||
cd ZLMediaKit
|
||||
git submodule update --init
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j4
|
||||
cd ..
|
||||
cd ..
|
||||
|
||||
mkdir ZLM
|
||||
cd ZLM
|
||||
cp ../ZLMediaKit/release/linux/Debug/MediaServer .
|
||||
cp ../ZLMediaKit/release/linux/Debug/config.ini .
|
||||
cd ..
|
||||
|
||||
cd ${current_dir}
|
||||
|
|
@ -1,95 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
echo "\033[32m[INFO]:\033[0m Please enter the folder path of the installation package: "
|
||||
# package_dir="/home/jario/Downloads/nv"
|
||||
|
||||
wget https://download.amovlab.com/model/install/x86-nvidia/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
|
||||
wget https://download.amovlab.com/model/install/x86-nvidia/cudnn-11.3-linux-x64-v8.2.1.32.tgz
|
||||
wget https://download.amovlab.com/model/install/x86-nvidia/nv-tensorrt-repo-ubuntu1804-cuda11.3-trt8.0.1.6-ga-20210626_1-1_amd64.deb
|
||||
wget https://download.amovlab.com/model/install/x86-nvidia/cuda-ubuntu1804.pin
|
||||
|
||||
package_dir="."
|
||||
|
||||
cuda_fn=$package_dir"/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb"
|
||||
cudnn_fn=$package_dir"/cudnn-11.3-linux-x64-v8.2.1.32.tgz"
|
||||
tensorrt_fn=$package_dir"/nv-tensorrt-repo-ubuntu1804-cuda11.3-trt8.0.1.6-ga-20210626_1-1_amd64.deb"
|
||||
tmp_dir="/tmp"
|
||||
|
||||
echo "\033[32m[INFO]: CUDA_PKG: \033[0m"$cuda_fn
|
||||
echo "\033[32m[INFO]: CUDNN_PKG: \033[0m"$cudnn_fn
|
||||
echo "\033[32m[INFO]: TENSORRT_PKG: \033[0m"$tensorrt_fn
|
||||
|
||||
# 所有文件都存在时,才会继续执行脚本
|
||||
if [ ! -f "$cuda_fn" ]; then
|
||||
echo "\033[31m[ERROR]: CUDA_PKG not exist!: \033[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$cudnn_fn" ]; then
|
||||
echo "\033[31m[ERROR]: CUDNN_PKG not exist!: \033[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$tensorrt_fn" ]; then
|
||||
echo "\033[31m[ERROR]: TENSORRT_PKG not exist!: \033[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 删除显卡驱动
|
||||
# sudo apt-get remove nvidia-*
|
||||
|
||||
# 安装显卡驱动
|
||||
# echo "\033[32m[INFO]: Nvidia Driver installing ...\033[0m"
|
||||
# ubuntu-drivers devices
|
||||
# sudo ubuntu-drivers autoinstall
|
||||
|
||||
# 删除已安装CUDA
|
||||
# --purge选项会将配置文件、数据库等删除
|
||||
# sudo apt-get autoremove --purge cuda
|
||||
# 查看安装了哪些cuda相关的库,可以用以下指令
|
||||
# sudo dpkg -l |grep cuda
|
||||
# sudo dpkg -P cuda-repo-ubuntu1804-10-2-local-10.2.89-440.33.01
|
||||
# sudo dpkg -P cuda-repo-ubuntu1804-11-1-local
|
||||
# sudo dpkg -P nv-tensorrt-repo-ubuntu1804-cuda10.2-trt8.0.1.6-ga-20210626
|
||||
# 这个key值是官网文档查到的,当然通过apt-key list也能查看
|
||||
# sudo apt-key list
|
||||
# sudo apt-key del 7fa2af80
|
||||
|
||||
# 安装CUDA
|
||||
echo "\033[32m[INFO]: CUDA installing ...\033[0m"
|
||||
# wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
|
||||
sudo cp $package_dir/cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600
|
||||
sudo dpkg -i $cuda_fn
|
||||
sudo apt-key add /var/cuda-repo-ubuntu1804-11-1-local/7fa2af80.pub
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install cuda
|
||||
|
||||
# 安装CUDNN
|
||||
echo "\033[32m[INFO]: CUDNN installing ...\033[0m"
|
||||
tar zxvf $cudnn_fn -C $tmp_dir
|
||||
sudo cp $tmp_dir/cuda/include/cudnn* /usr/local/cuda/include/
|
||||
sudo cp $tmp_dir/cuda/lib64/libcudnn* /usr/local/cuda/lib64/
|
||||
sudo chmod a+r /usr/local/cuda/include/cudnn* /usr/local/cuda/lib64/libcudnn*
|
||||
|
||||
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8
|
||||
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn.so.8
|
||||
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_ops_train.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_ops_train.so.8
|
||||
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_adv_train.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_adv_train.so.8
|
||||
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8
|
||||
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8
|
||||
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8
|
||||
|
||||
# 安装TensorRT
|
||||
sudo dpkg -i $tensorrt_fn
|
||||
sudo apt-key add /var/nv-tensorrt-repo-cuda11.3-trt8.0.1.6-ga-20210626/7fa2af80.pub
|
||||
sudo apt-get update
|
||||
sudo apt-get install tensorrt -y
|
||||
sudo apt-get install python3-libnvinfer-dev -y
|
||||
|
||||
sudo rm $cuda_fn
|
||||
sudo rm $cudnn_fn
|
||||
sudo rm $tensorrt_fn
|
||||
sudo rm cuda-ubuntu1804.pin
|
||||
|
||||
|
||||
|
|
@ -5,7 +5,7 @@ wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
|
|||
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
|
||||
wget https://download.amovlab.com/model/deps/opencv_cache_x86-4.7.0.zip
|
||||
|
||||
current_dir=$(pwd)
|
||||
|
||||
package_dir="."
|
||||
mkdir ~/opencv_build
|
||||
|
||||
|
@ -67,4 +67,3 @@ sudo make install
|
|||
|
||||
cd
|
||||
sudo rm -r ~/opencv_build
|
||||
cd ${current_dir}
|
|
@ -5,7 +5,7 @@ wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
|
|||
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
|
||||
wget https://download.amovlab.com/model/deps/opencv_cache_x86-4.7.0.zip
|
||||
|
||||
current_dir=$(pwd)
|
||||
|
||||
package_dir="."
|
||||
mkdir ~/opencv_build
|
||||
|
||||
|
@ -67,4 +67,3 @@ sudo make install
|
|||
|
||||
cd
|
||||
sudo rm -r ~/opencv_build
|
||||
cd ${current_dir}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue