5 Commits

Author SHA1 Message Date
AiYangSky
fe18e3e0b6 build done 2023-12-21 18:46:16 +08:00
AiYangSky
5d2e5b0250 code done 2023-12-21 17:33:06 +08:00
AiYangSky
9f35bf476e add G1 camera 2023-12-20 19:05:47 +08:00
AiYangSky
173acc7036 add G1 camera 2023-12-20 18:55:51 +08:00
AiYangSky
a0d3e258d9 camerabase done 2023-12-19 18:44:25 +08:00
113 changed files with 1724 additions and 4015 deletions

12
.gitignore vendored
View File

@@ -1,13 +1,6 @@
# Distribution / packaging
.Python
build/
models/
confs/
ZLM/
ZLMediaKit/
ffmpeg-4.2.5/
nv-codec-headers/
*.bz2
develop-eggs/
dist/
eggs/
@@ -22,11 +15,6 @@ share/python-wheels/
*.egg
MANIFEST
.idea/
calib_webcam_1280x720.yaml
calib_webcam_640x480.yaml
sv_algorithm_params.json
sv_algorithm_params_coco_1280.json
sv_algorithm_params_coco_640.json
# Prerequisites
*.d

View File

@@ -24,7 +24,6 @@ else()
option(USE_FFMPEG "BUILD WITH FFMPEG." ON)
elseif(PLATFORM STREQUAL "X86_INTEL")
add_definitions(-DPLATFORM_X86_INTEL)
option(USE_INTEL "BUILD WITH INTEL." ON)
option(USE_FFMPEG "BUILD WITH FFMPEG." ON)
else()
message(FATAL_ERROR "UNSUPPORTED PLATFORM!")
@@ -39,10 +38,6 @@ if(USE_CUDA)
message(STATUS "CUDA: ON")
endif()
if(USE_INTEL)
add_definitions(-DWITH_INTEL)
message(STATUS "INTEL: ON")
endif()
if(USE_GSTREAMER)
add_definitions(-DWITH_GSTREAMER)
@@ -77,9 +72,6 @@ include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/GX40
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/intel
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/intel
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/veri/intel
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/cuda
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/sot/ocv470
@@ -127,7 +119,8 @@ set(
include/sv_mot.h
include/sv_color_line.h
include/sv_veri_det.h
include/sv_video_input.h
# include/sv_video_input.h
include/sv_camera.h
include/sv_video_output.h
include/sv_world.h
)
@@ -195,23 +188,7 @@ if(USE_CUDA)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
endif()
if(USE_INTEL)
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/intel/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/intel/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/veri/intel/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
endif()
if(USE_FFMPEG)
if(USE_INTEL)
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/x86_intel/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
elseif(USE_CUDA)
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/x86_cuda/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
endif()
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
endif()
@@ -255,22 +232,10 @@ if(USE_CUDA) # PLATFORM_X86_CUDA & PLATFORM_JETSON
target_link_libraries(SpireCVSeg sv_world)
elseif(PLATFORM STREQUAL "X86_INTEL") # Links to Intel-OpenVINO libraries here
# Intel-Openvino
include_directories(
PUBLIC /opt/intel/openvino_2022/runtime/include/
PUBLIC /opt/intel/openvino_2022/runtime/include/ie/
)
link_directories(
${InferenceEngine_LIBRARIES}
/opt/intel/openvino_2022/runtime/lib/intel64/libopenvino.so
)
add_library(sv_world SHARED ${spirecv_SRCS})
target_link_libraries(
sv_world ${OpenCV_LIBS}
sv_gimbal
${InferenceEngine_LIBRARIES}
/opt/intel/openvino_2022/runtime/lib/intel64/libopenvino.so
)
endif()
@@ -364,14 +329,12 @@ if(PLATFORM STREQUAL "JETSON")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[
@PACKAGE_INIT@
find_package(OpenCV 4 REQUIRED)
find_package(Eigen3 REQUIRED)
link_directories(/usr/local/cuda/lib64)
set(SV_INCLUDE_DIRS
@SV_INSTALL_PREFIX@/include
/usr/include/x86_64-linux-gnu
/usr/local/cuda/include
${OpenCV_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIRS}
/usr/include/gstreamer-1.0
/usr/local/include/gstreamer-1.0
/usr/include/glib-2.0
@@ -390,7 +353,6 @@ elseif(PLATFORM STREQUAL "X86_CUDA")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[
@PACKAGE_INIT@
find_package(OpenCV 4 REQUIRED)
find_package(Eigen3 REQUIRED)
find_package(fmt REQUIRED)
link_directories(/usr/local/cuda/lib64)
set(SV_INCLUDE_DIRS
@@ -398,7 +360,6 @@ set(SV_INCLUDE_DIRS
/usr/include/x86_64-linux-gnu
/usr/local/cuda/include
${OpenCV_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIRS}
)
set(SV_LIBRARIES
@SV_INSTALL_PREFIX@/lib/libsv_yoloplugins.so
@@ -413,13 +374,11 @@ elseif(PLATFORM STREQUAL "X86_INTEL")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[
@PACKAGE_INIT@
find_package(OpenCV 4 REQUIRED)
find_package(Eigen3 REQUIRED)
find_package(fmt REQUIRED)
set(SV_INCLUDE_DIRS
@SV_INSTALL_PREFIX@/include
/usr/include/x86_64-linux-gnu
${OpenCV_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIRS}
)
set(SV_LIBRARIES
@SV_INSTALL_PREFIX@/lib/libsv_world.so

View File

@@ -38,9 +38,9 @@ SpireCV is an **real-time edge perception SDK** built for **intelligent unmanned
- **Platform level**
- [x] X86 + Nvidia GPUs (10 series, 20 series, and 30 series graphics cards recommended)
- [x] Jetson (AGX Orin/Xavier、Orin NX/Nano、Xavier NX)
- [x] Intel CPU
- [ ] HUAWEI Ascend (coming soon)
- [ ] Intel CPU (coming soon)
- [ ] Rockchip (coming soon)
- [ ] HUAWEI Ascend (coming soon)
## Demos
- **QR code detection**

View File

@@ -38,9 +38,9 @@ SpireCV是一个专为**智能无人系统**打造的**边缘实时感知SDK**
- **平台层**
- [x] X86+Nvidia GPU推荐10系、20系、30系显卡
- [x] JetsonAGX Orin/Xavier、Orin NX/Nano、Xavier NX
- [x] Intel CPU
- [ ] HUAWEI Ascend推进中
- [ ] Intel CPU(推进中)
- [ ] Rockchip推进中
- [ ] HUAWEI Ascend推进中
## 功能展示
- **二维码检测**

View File

@@ -1,8 +1,6 @@
#include "common_det_cuda_impl.h"
#include <cmath>
#include <fstream>
#include <iostream>
#include "sv_util.h"
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
@@ -364,51 +362,17 @@ bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_, bo
bool with_segmentation = base_->withSegmentation();
double thrs_conf = base_->getThrsConf();
double thrs_nms = base_->getThrsNms();
std::string model = base_->getModel();
int bs = base_->getBatchSize();
char bs_c[8];
sprintf(bs_c, "%d", bs);
std::string bs_s(bs_c);
std::string engine_fn = get_home() + SV_MODEL_DIR + dataset + ".engine";
std::vector<std::string> files;
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-" + dataset + "-yolov5" + model + "_b" + bs_s + "_c");
if (files.size() > 0)
{
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
engine_fn = get_home() + SV_MODEL_DIR + files[0];
}
if (input_w == 1280)
{
files.clear();
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-" + dataset + "-yolov5" + model + "6_b" + bs_s + "_c");
if (files.size() > 0)
{
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
engine_fn = get_home() + SV_MODEL_DIR + files[0];
}
else
{
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.engine";
}
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.engine";
}
if (with_segmentation)
{
base_->setInputH(640);
base_->setInputW(640);
files.clear();
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-" + dataset + "-yolov5" + model + "_seg_b" + bs_s + "_c");
if (files.size() > 0)
{
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
engine_fn = get_home() + SV_MODEL_DIR + files[0];
}
else
{
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.engine";
}
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.engine";
}
std::cout << "Load: " << engine_fn << std::endl;
if (!is_file_exist(engine_fn))
@@ -450,5 +414,17 @@ bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_, bo
return false;
}
}

View File

@@ -1,479 +0,0 @@
#include "common_det_intel_impl.h"
#include <cmath>
#include <fstream>
#include "sv_util.h"
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
namespace sv
{
#ifdef WITH_INTEL
using namespace cv;
using namespace std;
using namespace dnn;
#endif
float sigmoid_function(float a)
{
float b = 1. / (1. + exp(-a));
return b;
}
cv::Mat letterbox(cv::Mat &img_, std::vector<float> &paddings)
{
std::vector<int> new_shape = {640, 640};
// Get current image shape [height, width]
int img_h = img_.rows;
int img_w = img_.cols;
// Compute scale ratio(new / old) and target resized shape
float scale = std::min(new_shape[1] * 1.0 / img_h, new_shape[0] * 1.0 / img_w);
int resize_h = int(round(img_h * scale));
int resize_w = int(round(img_w * scale));
paddings[0] = scale;
// Compute padding
int pad_h = new_shape[1] - resize_h;
int pad_w = new_shape[0] - resize_w;
// Resize and pad image while meeting stride-multiple constraints
cv::Mat resized_img;
cv::resize(img_, resized_img, cv::Size(resize_w, resize_h));
// divide padding into 2 sides
float half_h = pad_h * 1.0 / 2;
float half_w = pad_w * 1.0 / 2;
paddings[1] = half_h;
paddings[2] = half_w;
// Compute padding boarder
int top = int(round(half_h - 0.1));
int bottom = int(round(half_h + 0.1));
int left = int(round(half_w - 0.1));
int right = int(round(half_w + 0.1));
// Add border
cv::copyMakeBorder(resized_img, resized_img, top, bottom, left, right, 0, cv::Scalar(114, 114, 114));
return resized_img;
}
CommonObjectDetectorIntelImpl::CommonObjectDetectorIntelImpl()
{
}
CommonObjectDetectorIntelImpl::~CommonObjectDetectorIntelImpl()
{
}
void CommonObjectDetectorIntelImpl::intelDetect(
CommonObjectDetectorBase *base_,
cv::Mat img_,
std::vector<float> &boxes_x_,
std::vector<float> &boxes_y_,
std::vector<float> &boxes_w_,
std::vector<float> &boxes_h_,
std::vector<int> &boxes_label_,
std::vector<float> &boxes_score_,
std::vector<cv::Mat> &boxes_seg_,
bool input_4k_)
{
#ifdef WITH_INTEL
int input_h = base_->getInputH();
int input_w = base_->getInputW();
bool with_segmentation = base_->withSegmentation();
double thrs_conf = base_->getThrsConf();
double thrs_nms = base_->getThrsNms();
if (with_segmentation)
{
std::vector<float> paddings(3); // scale, half_h, half_w
this->preprocess_img_seg(img_, paddings);
infer_request.start_async();
infer_request.wait();
// Postprocess
this->postprocess_img_seg(img_, paddings, boxes_x_, boxes_y_, boxes_w_, boxes_h_, boxes_label_, boxes_score_, boxes_seg_, thrs_conf, thrs_nms);
}
else
{
// Preprocess
this->preprocess_img(img_);
// Run inference
infer_request.start_async();
infer_request.wait();
// Postprocess
this->postprocess_img(boxes_x_, boxes_y_, boxes_w_, boxes_h_, boxes_label_, boxes_score_, thrs_conf, thrs_nms);
}
#endif
}
bool CommonObjectDetectorIntelImpl::intelSetup(CommonObjectDetectorBase *base_, bool input_4k_)
{
#ifdef WITH_INTEL
ov::Core core;
std::string dataset = base_->getDataset();
double thrs_conf = base_->getThrsConf();
double thrs_nms = base_->getThrsNms();
inpHeight = base_->getInputH();
inpWidth = base_->getInputW();
with_segmentation = base_->withSegmentation();
std::string model = base_->getModel();
std::string openvino_fn = get_home() + SV_MODEL_DIR + dataset + ".onnx";
std::vector<std::string> files;
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-" + dataset + "-yolov5" + model + "_c");
if (files.size() > 0)
{
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
openvino_fn = get_home() + SV_MODEL_DIR + files[0];
}
if (inpWidth == 1280)
{
files.clear();
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-" + dataset + "-yolov5" + model + "6_c");
if (files.size() > 0)
{
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
openvino_fn = get_home() + SV_MODEL_DIR + files[0];
}
else
{
openvino_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.onnx";
}
}
if (with_segmentation)
{
base_->setInputH(640);
base_->setInputW(640);
files.clear();
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-" + dataset + "-yolov5" + model + "_seg_c");
if (files.size() > 0)
{
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
openvino_fn = get_home() + SV_MODEL_DIR + files[0];
}
else
{
openvino_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.onnx";
}
}
std::cout << "Load: " << openvino_fn << std::endl;
if (!is_file_exist(openvino_fn))
{
throw std::runtime_error("SpireCV (104) Error loading the CommonObject OpenVINO model (File Not Exist)");
}
if (input_4k_ && with_segmentation)
{
throw std::runtime_error("SpireCV (106) Resolution 4K DO NOT Support Segmentation!");
}
if (with_segmentation)
{
this->compiled_model = core.compile_model(openvino_fn, "GPU");
this->infer_request = compiled_model.create_infer_request();
}
else
{
std::shared_ptr<ov::Model> model_ = core.read_model(openvino_fn);
ov::preprocess::PrePostProcessor Pre_P = ov::preprocess::PrePostProcessor(model_);
Pre_P.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::RGB);
Pre_P.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255, 255, 255}); // .scale({ 112, 112, 112 });
Pre_P.input().model().set_layout("NCHW");
Pre_P.output().tensor().set_element_type(ov::element::f32);
model_ = Pre_P.build();
this->compiled_model = core.compile_model(model_, "GPU");
this->infer_request = compiled_model.create_infer_request();
}
return true;
#endif
return false;
}
void CommonObjectDetectorIntelImpl::preprocess_img(cv::Mat &img_)
{
#ifdef WITH_INTEL
float width = img_.cols;
float height = img_.rows;
cv::Size new_shape = cv::Size(inpHeight, inpWidth);
float r = float(new_shape.width / max(width, height));
int new_unpadW = int(round(width * r));
int new_unpadH = int(round(height * r));
cv::resize(img_, resize.resized_image, cv::Size(new_unpadW, new_unpadH), 0, 0, cv::INTER_AREA);
resize.resized_image = resize.resized_image;
resize.dw = new_shape.width - new_unpadW;
resize.dh = new_shape.height - new_unpadH;
cv::Scalar color = cv::Scalar(100, 100, 100);
cv::copyMakeBorder(resize.resized_image, resize.resized_image, 0, resize.dh, 0, resize.dw, cv::BORDER_CONSTANT, color);
this->rx = (float)img_.cols / (float)(resize.resized_image.cols - resize.dw);
this->ry = (float)img_.rows / (float)(resize.resized_image.rows - resize.dh);
if (with_segmentation)
{
cv::Mat blob = cv::dnn::blobFromImage(resize.resized_image, 1 / 255.0, cv::Size(640, 640), cv::Scalar(0, 0, 0), true);
auto input_port = compiled_model.input();
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blob.ptr(0));
infer_request.set_input_tensor(input_tensor);
}
else
{
float *input_data = (float *)resize.resized_image.data;
input_tensor = ov::Tensor(compiled_model.input().get_element_type(), compiled_model.input().get_shape(), input_data);
infer_request.set_input_tensor(input_tensor);
}
#endif
}
void CommonObjectDetectorIntelImpl::preprocess_img_seg(cv::Mat &img_, std::vector<float> &paddings)
{
#ifdef WITH_INTEL
cv::Mat masked_img;
cv::Mat resized_img = letterbox(img_, paddings); // resize to (640,640) by letterbox
// BGR->RGB, u8(0-255)->f32(0.0-1.0), HWC->NCHW
cv::Mat blob = cv::dnn::blobFromImage(resized_img, 1 / 255.0, cv::Size(640, 640), cv::Scalar(0, 0, 0), true);
// Get input port for model with one input
auto input_port = compiled_model.input();
// Create tensor from external memory
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blob.ptr(0));
// Set input tensor for model with one input
infer_request.set_input_tensor(input_tensor);
#endif
}
void CommonObjectDetectorIntelImpl::postprocess_img_seg(cv::Mat &img_,
std::vector<float> &paddings,
std::vector<float> &boxes_x_,
std::vector<float> &boxes_y_,
std::vector<float> &boxes_w_,
std::vector<float> &boxes_h_,
std::vector<int> &boxes_label_,
std::vector<float> &boxes_score_,
std::vector<cv::Mat> &boxes_seg_,
double &thrs_conf,
double &thrs_nms)
{
#ifdef WITH_INTEL
const ov::Tensor &detect = infer_request.get_output_tensor(0);
ov::Shape detect_shape = detect.get_shape();
const ov::Tensor &proto = infer_request.get_output_tensor(1);
ov::Shape proto_shape = proto.get_shape();
cv::Mat detect_buffer(detect_shape[1], detect_shape[2], CV_32F, detect.data());
cv::Mat proto_buffer(proto_shape[1], proto_shape[2] * proto_shape[3], CV_32F, proto.data());
cv::RNG rng;
float conf_threshold = thrs_conf;
float nms_threshold = thrs_nms;
std::vector<cv::Rect> boxes;
std::vector<int> class_ids;
std::vector<float> class_scores;
std::vector<float> confidences;
std::vector<cv::Mat> masks;
float scale = paddings[0];
for (int i = 0; i < detect_buffer.rows; i++)
{
float confidence = detect_buffer.at<float>(i, 4);
if (confidence < conf_threshold)
{
continue;
}
cv::Mat classes_scores = detect_buffer.row(i).colRange(5, 85);
cv::Point class_id;
double score;
cv::minMaxLoc(classes_scores, NULL, &score, NULL, &class_id);
// class score: 0~1
if (score > 0.25)
{
cv::Mat mask = detect_buffer.row(i).colRange(85, 117);
float cx = detect_buffer.at<float>(i, 0);
float cy = detect_buffer.at<float>(i, 1);
float w = detect_buffer.at<float>(i, 2);
float h = detect_buffer.at<float>(i, 3);
int left = static_cast<int>((cx - 0.5 * w - paddings[2]) / scale);
int top = static_cast<int>((cy - 0.5 * h - paddings[1]) / scale);
int width = static_cast<int>(w / scale);
int height = static_cast<int>(h / scale);
cv::Rect box;
box.x = left;
box.y = top;
box.width = width;
box.height = height;
boxes.push_back(box);
class_ids.push_back(class_id.x);
class_scores.push_back(score);
confidences.push_back(confidence);
masks.push_back(mask);
}
}
// NMS
std::vector<int> indices;
cv::dnn::NMSBoxes(boxes, confidences, thrs_conf, thrs_nms, indices);
// cv::Mat rgb_mask;
cv::Mat rgb_mask = cv::Mat::zeros(img_.size(), img_.type());
for (size_t i = 0; i < indices.size(); i++)
{
int index = indices[i];
int class_id = class_ids[index];
cv::Rect box = boxes[index];
int x1 = std::max(0, box.x);
int y1 = std::max(0, box.y);
int x2 = std::max(0, box.br().x);
int y2 = std::max(0, box.br().y);
cv::Mat m = masks[index] * proto_buffer;
for (int col = 0; col < m.cols; col++)
{
m.at<float>(0, col) = sigmoid_function(m.at<float>(0, col));
}
cv::Mat m1 = m.reshape(1, 160); // 1x25600 -> 160x160
int mx1 = std::max(0, int((x1 * scale + paddings[2]) * 0.25));
int mx2 = std::max(0, int((x2 * scale + paddings[2]) * 0.25));
int my1 = std::max(0, int((y1 * scale + paddings[1]) * 0.25));
int my2 = std::max(0, int((y2 * scale + paddings[1]) * 0.25));
cv::Mat mask_roi = m1(cv::Range(my1, my2), cv::Range(mx1, mx2));
cv::Mat rm, det_mask;
cv::resize(mask_roi, rm, cv::Size(x2 - x1, y2 - y1));
for (int r = 0; r < rm.rows; r++)
{
for (int c = 0; c < rm.cols; c++)
{
float pv = rm.at<float>(r, c);
if (pv > 0.5)
{
rm.at<float>(r, c) = 1.0;
}
else
{
rm.at<float>(r, c) = 0.0;
}
}
}
rm = rm * rng.uniform(0, 255);
rm.convertTo(det_mask, CV_8UC1);
if ((y1 + det_mask.rows) >= img_.rows)
{
y2 = img_.rows - 1;
}
if ((x1 + det_mask.cols) >= img_.cols)
{
x2 = img_.cols - 1;
}
cv::Mat mask = cv::Mat::zeros(cv::Size(img_.cols, img_.rows), CV_8UC1);
det_mask(cv::Range(0, y2 - y1), cv::Range(0, x2 - x1)).copyTo(mask(cv::Range(y1, y2), cv::Range(x1, x2)));
add(rgb_mask, cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), rgb_mask, mask);
boxes_x_.push_back(box.x);
boxes_y_.push_back(box.y);
boxes_w_.push_back(box.width);
boxes_h_.push_back(box.height);
boxes_label_.push_back((int)class_id);
boxes_score_.push_back(class_scores[index]);
cv::Mat mask_j = mask.clone();
boxes_seg_.push_back(mask_j);
}
#endif
}
void CommonObjectDetectorIntelImpl::postprocess_img(std::vector<float> &boxes_x_,
std::vector<float> &boxes_y_,
std::vector<float> &boxes_w_,
std::vector<float> &boxes_h_,
std::vector<int> &boxes_label_,
std::vector<float> &boxes_score_,
double &thrs_conf,
double &thrs_nms)
{
#ifdef WITH_INTEL
const ov::Tensor &output_tensor = infer_request.get_output_tensor();
ov::Shape output_shape = output_tensor.get_shape();
float *detections = output_tensor.data<float>();
std::vector<cv::Rect> boxes;
vector<int> class_ids;
vector<float> confidences;
for (int i = 0; i < output_shape[1]; i++)
{
float *detection = &detections[i * output_shape[2]];
float confidence = detection[4];
if (confidence >= thrs_conf)
{
float *classes_scores = &detection[5];
cv::Mat scores(1, output_shape[2] - 5, CV_32FC1, classes_scores);
cv::Point class_id;
double max_class_score;
cv::minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
if (max_class_score > thrs_conf)
{
confidences.push_back(confidence);
class_ids.push_back(class_id.x);
float x = detection[0];
float y = detection[1];
float w = detection[2];
float h = detection[3];
float xmin = x - (w / 2);
float ymin = y - (h / 2);
boxes.push_back(cv::Rect(xmin, ymin, w, h));
}
}
}
std::vector<int> nms_result;
cv::dnn::NMSBoxes(boxes, confidences, thrs_conf, thrs_nms, nms_result);
std::vector<Detection> output;
for (int i = 0; i < nms_result.size(); i++)
{
Detection result;
int idx = nms_result[i];
result.class_id = class_ids[idx];
result.confidence = confidences[idx];
result.box = boxes[idx];
output.push_back(result);
}
for (int i = 0; i < output.size(); i++)
{
auto detection = output[i];
auto box = detection.box;
auto classId = detection.class_id;
auto confidence = detection.confidence;
float xmax = box.x + box.width;
float ymax = box.y + box.height;
boxes_x_.push_back(this->rx * box.x);
boxes_y_.push_back(this->ry * box.y);
boxes_w_.push_back(this->rx * box.width);
boxes_h_.push_back(this->ry * box.height);
boxes_label_.push_back((int)detection.class_id);
boxes_score_.push_back(detection.confidence);
}
#endif
}
}

View File

@@ -1,89 +0,0 @@
#ifndef __SV_COMMON_DET_INTEL__
#define __SV_COMMON_DET_INTEL__
#include "sv_core.h"
#include <string>
#include <iostream>
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <string>
#include <chrono>
#ifdef WITH_INTEL
#include <openvino/openvino.hpp>
#endif
struct Resize
{
cv::Mat resized_image;
int dw;
int dh;
};
struct Detection
{
int class_id;
float confidence;
cv::Rect box;
};
namespace sv
{
class CommonObjectDetectorIntelImpl
{
public:
CommonObjectDetectorIntelImpl();
~CommonObjectDetectorIntelImpl();
bool intelSetup(CommonObjectDetectorBase *base_, bool input_4k_);
void intelDetect(
CommonObjectDetectorBase *base_,
cv::Mat img_,
std::vector<float> &boxes_x_,
std::vector<float> &boxes_y_,
std::vector<float> &boxes_w_,
std::vector<float> &boxes_h_,
std::vector<int> &boxes_label_,
std::vector<float> &boxes_score_,
std::vector<cv::Mat> &boxes_seg_,
bool input_4k_);
void preprocess_img(cv::Mat &img_);
void preprocess_img_seg(cv::Mat &img_, std::vector<float> &paddings);
void postprocess_img_seg(cv::Mat &img_,
std::vector<float> &paddings,
std::vector<float> &boxes_x_,
std::vector<float> &boxes_y_,
std::vector<float> &boxes_w_,
std::vector<float> &boxes_h_,
std::vector<int> &boxes_label_,
std::vector<float> &boxes_score_,
std::vector<cv::Mat> &boxes_seg_,
double &thrs_conf,
double &thrs_nms);
void postprocess_img(std::vector<float> &boxes_x_,
std::vector<float> &boxes_y_,
std::vector<float> &boxes_w_,
std::vector<float> &boxes_h_,
std::vector<int> &boxes_label_,
std::vector<float> &boxes_score_,
double &thrs_conf,
double &thrs_nms);
#ifdef WITH_INTEL
int inpWidth;
int inpHeight;
bool with_segmentation;
float rx; // the width ratio of original image and resized image
float ry; // the height ratio of original image and resized image
Resize resize;
ov::Tensor input_tensor;
ov::InferRequest infer_request;
ov::CompiledModel compiled_model;
#endif
};
}
#endif

View File

@@ -8,10 +8,6 @@
#include "common_det_cuda_impl.h"
#endif
#ifdef WITH_INTEL
#include <openvino/openvino.hpp>
#include "common_det_intel_impl.h"
#endif
namespace sv {
@@ -22,10 +18,6 @@ CommonObjectDetector::CommonObjectDetector(bool input_4k)
#ifdef WITH_CUDA
this->_cuda_impl = new CommonObjectDetectorCUDAImpl;
#endif
#ifdef WITH_INTEL
this->_intel_impl = new CommonObjectDetectorIntelImpl;
#endif
}
CommonObjectDetector::~CommonObjectDetector()
{
@@ -36,10 +28,6 @@ bool CommonObjectDetector::setupImpl()
#ifdef WITH_CUDA
return this->_cuda_impl->cudaSetup(this, this->_input_4k);
#endif
#ifdef WITH_INTEL
return this->_intel_impl->intelSetup(this, this->_input_4k);
#endif
return false;
}
@@ -65,23 +53,14 @@ void CommonObjectDetector::detectImpl(
boxes_label_,
boxes_score_,
boxes_seg_,
this->_input_4k);
#endif
#ifdef WITH_INTEL
this->_intel_impl->intelDetect(
this,
img_,
boxes_x_,
boxes_y_,
boxes_w_,
boxes_h_,
boxes_label_,
boxes_score_,
boxes_seg_,
this->_input_4k);
this->_input_4k
);
#endif
}
}

View File

@@ -1,7 +1,6 @@
#include "landing_det_cuda_impl.h"
#include <cmath>
#include <fstream>
#include "sv_util.h"
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
@@ -51,16 +50,6 @@ bool LandingMarkerDetectorCUDAImpl::cudaSetup()
{
#ifdef WITH_CUDA
std::string trt_model_fn = get_home() + SV_MODEL_DIR + "LandingMarker.engine";
std::vector<std::string> files;
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-LandingMarker-resnet34");
if (files.size() > 0)
{
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
trt_model_fn = get_home() + SV_MODEL_DIR + files[0];
}
std::cout << "Load: " << trt_model_fn << std::endl;
if (!is_file_exist(trt_model_fn))
{
throw std::runtime_error("SpireCV (104) Error loading the LandingMarker TensorRT model (File Not Exist)");

View File

@@ -1,104 +0,0 @@
#include "landing_det_intel_impl.h"
#include <cmath>
#include <fstream>
#include "sv_util.h"
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
namespace sv
{
#ifdef WITH_INTEL
using namespace cv;
using namespace std;
using namespace dnn;
#endif
LandingMarkerDetectorIntelImpl::LandingMarkerDetectorIntelImpl()
{
}
LandingMarkerDetectorIntelImpl::~LandingMarkerDetectorIntelImpl()
{
}
bool LandingMarkerDetectorIntelImpl::intelSetup()
{
#ifdef WITH_INTEL
std::string onnx_model_fn = get_home() + SV_MODEL_DIR + "LandingMarker.onnx";
std::vector<std::string> files;
list_dir(get_home() + SV_MODEL_DIR, files, "-online.onnx", "Int-LandingMarker-resnet34");
if (files.size() > 0)
{
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
onnx_model_fn = get_home() + SV_MODEL_DIR + files[0];
}
if (!is_file_exist(onnx_model_fn))
{
throw std::runtime_error("SpireCV (104) Error loading the LandingMarker ONNX model (File Not Exist)");
}
// OpenVINO
ov::Core core;
std::shared_ptr<ov::Model> model = core.read_model(onnx_model_fn);
ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);
ppp.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::RGB);
ppp.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255, 255, 255}); // .scale({ 112, 112, 112 });
ppp.input().model().set_layout("NCHW");
ppp.output().tensor().set_element_type(ov::element::f32);
model = ppp.build();
this->compiled_model = core.compile_model(model, "GPU");
this->infer_request = compiled_model.create_infer_request();
return true;
#endif
return false;
}
void LandingMarkerDetectorIntelImpl::intelRoiCNN(
std::vector<cv::Mat> &input_rois_,
std::vector<int> &output_labels_)
{
#ifdef WITH_INTEL
output_labels_.clear();
for (int i = 0; i < input_rois_.size(); i++)
{
cv::Mat e_roi = input_rois_[i];
// Get input port for model with one input
auto input_port = compiled_model.input();
// Create tensor from external memory
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), e_roi.ptr(0));
// Set input tensor for model with one input
infer_request.set_input_tensor(input_tensor);
//preprocess_img(e_roi);
// infer_request.infer();
infer_request.start_async();
infer_request.wait();
const ov::Tensor &output_tensor = infer_request.get_output_tensor();
ov::Shape output_shape = output_tensor.get_shape();
this->_p_prob = output_tensor.data<float>();
// Find max index
double max = 0;
int label = 0;
for (int i = 0; i < 11; ++i)
{
if (max < this->_p_prob[i])
{
max = this->_p_prob[i];
label = i;
}
}
output_labels_.push_back(label);
}
#endif
}
}

View File

@@ -1,37 +0,0 @@
#ifndef __SV_LANDING_DET_INTEL__
#define __SV_LANDING_DET_INTEL__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
#ifdef WITH_INTEL
#include <openvino/openvino.hpp>
#endif
namespace sv
{
class LandingMarkerDetectorIntelImpl
{
public:
LandingMarkerDetectorIntelImpl();
~LandingMarkerDetectorIntelImpl();
bool intelSetup();
void intelRoiCNN(
std::vector<cv::Mat> &input_rois_,
std::vector<int> &output_labels_);
#ifdef WITH_INTEL
float *_p_prob;
ov::Tensor input_tensor;
ov::InferRequest infer_request;
ov::CompiledModel compiled_model;
#endif
};
}
#endif

View File

@@ -7,10 +7,6 @@
#include "landing_det_cuda_impl.h"
#endif
#ifdef WITH_INTEL
#include <openvino/openvino.hpp>
#include "landing_det_intel_impl.h"
#endif
namespace sv {
@@ -20,10 +16,6 @@ LandingMarkerDetector::LandingMarkerDetector()
#ifdef WITH_CUDA
this->_cuda_impl = new LandingMarkerDetectorCUDAImpl;
#endif
#ifdef WITH_INTEL
this->_intel_impl = new LandingMarkerDetectorIntelImpl;
#endif
}
LandingMarkerDetector::~LandingMarkerDetector()
{
@@ -34,10 +26,6 @@ bool LandingMarkerDetector::setupImpl()
#ifdef WITH_CUDA
return this->_cuda_impl->cudaSetup();
#endif
#ifdef WITH_INTEL
return this->_intel_impl->intelSetup();
#endif
return false;
}
@@ -52,13 +40,11 @@ void LandingMarkerDetector::roiCNN(
output_labels_
);
#endif
#ifdef WITH_INTEL
this->_intel_impl->intelRoiCNN(
input_rois_,
output_labels_
);
#endif
}
}
}

View File

@@ -1,7 +1,6 @@
#include "sot_ocv470_impl.h"
#include <cmath>
#include <fstream>
#include "sv_util.h"
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
@@ -31,39 +30,9 @@ bool SingleObjectTrackerOCV470Impl::ocv470Setup(SingleObjectTrackerBase* base_)
std::string net = get_home() + SV_MODEL_DIR + "dasiamrpn_model.onnx";
std::string kernel_cls1 = get_home() + SV_MODEL_DIR + "dasiamrpn_kernel_cls1.onnx";
std::string kernel_r1 = get_home() + SV_MODEL_DIR + "dasiamrpn_kernel_r1.onnx";
std::vector<std::string> files1, files2, files3;
_list_dir(get_home() + SV_MODEL_DIR, files1, ".onnx", "Ocv-DaSiamRPN-Model-");
_list_dir(get_home() + SV_MODEL_DIR, files2, ".onnx", "Ocv-DaSiamRPN-Kernel-CLS1-");
_list_dir(get_home() + SV_MODEL_DIR, files3, ".onnx", "Ocv-DaSiamRPN-Kernel-R1-");
if (files1.size() > 0 && files2.size() > 0 && files3.size() > 0)
{
std::sort(files1.rbegin(), files1.rend(), _comp_str_lesser);
std::sort(files2.rbegin(), files2.rend(), _comp_str_lesser);
std::sort(files3.rbegin(), files3.rend(), _comp_str_lesser);
net = get_home() + SV_MODEL_DIR + files1[0];
kernel_cls1 = get_home() + SV_MODEL_DIR + files2[0];
kernel_r1 = get_home() + SV_MODEL_DIR + files3[0];
}
std::cout << "Load: " << net << std::endl;
std::cout << "Load: " << kernel_cls1 << std::endl;
std::cout << "Load: " << kernel_r1 << std::endl;
std::string backbone = get_home() + SV_MODEL_DIR + "nanotrack_backbone_sim.onnx";
std::string neckhead = get_home() + SV_MODEL_DIR + "nanotrack_head_sim.onnx";
std::vector<std::string> files4, files5;
_list_dir(get_home() + SV_MODEL_DIR, files4, ".onnx", "Ocv-NanoTrack-Backbone-SIM-");
_list_dir(get_home() + SV_MODEL_DIR, files5, ".onnx", "Ocv-NanoTrack-Head-SIM-");
if (files4.size() > 0 && files5.size() > 0)
{
std::sort(files4.rbegin(), files4.rend(), _comp_str_lesser);
std::sort(files5.rbegin(), files5.rend(), _comp_str_lesser);
backbone = get_home() + SV_MODEL_DIR + files4[0];
neckhead = get_home() + SV_MODEL_DIR + files5[0];
}
std::cout << "Load: " << backbone << std::endl;
std::cout << "Load: " << neckhead << std::endl;
try
{

View File

@@ -32,7 +32,7 @@ CameraAlgorithm::CameraAlgorithm()
// this->_allocator = NULL;
this->_t0 = std::chrono::system_clock::now();
this->alg_params_fn = _get_home() + SV_ROOT_DIR + "confs/sv_algorithm_params.json";
this->alg_params_fn = _get_home() + SV_ROOT_DIR + "sv_algorithm_params.json";
// std::cout << "CameraAlgorithm->alg_params_fn: " << this->alg_params_fn << std::endl;
// if (_is_file_exist(params_fn))
// this->loadAlgorithmParams(params_fn);
@@ -80,12 +80,6 @@ ArucoDetector::ArucoDetector()
}
void ArucoDetector::getIdsWithLengths(std::vector<int>& ids_, std::vector<double>& lengths_)
{
ids_ = this->_ids_need;
lengths_ = this->_lengths_need;
}
void ArucoDetector::_load()
{
JsonValue all_value;
@@ -916,14 +910,6 @@ void CommonObjectDetectorBase::setInputW(int w_)
{
this->_input_w = w_;
}
std::string CommonObjectDetectorBase::getModel()
{
return this->_model;
}
int CommonObjectDetectorBase::getBatchSize()
{
return this->_batch_size;
}
void CommonObjectDetectorBase::warmUp()
{
@@ -1010,11 +996,6 @@ void CommonObjectDetectorBase::detect(cv::Mat img_, TargetsInFrame& tgts_, Box*
if (this->_with_segmentation)
{
cv::Mat mask_j = boxes_seg[j].clone();
#ifdef WITH_INTEL
tgt.setMask(mask_j);
#endif
#ifdef WITH_CUDA
int maskh = mask_j.rows, maskw = mask_j.cols;
assert(maskh == maskw);
@@ -1044,7 +1025,6 @@ void CommonObjectDetectorBase::detect(cv::Mat img_, TargetsInFrame& tgts_, Box*
{
tgt.setMask(mask_j);
}
#endif
}
tgts_.targets.push_back(tgt);
@@ -1090,8 +1070,6 @@ void CommonObjectDetectorBase::_load()
this->_thrs_nms = 0.6;
this->_thrs_conf = 0.4;
this->_use_width_or_height = 0;
this->_batch_size = 1;
this->_model = "s";
for (auto i : detector_params_value) {
@@ -1099,12 +1077,6 @@ void CommonObjectDetectorBase::_load()
this->_dataset = i->value.toString();
std::cout << "dataset: " << this->_dataset << std::endl;
}
else if ("batchSize" == std::string(i->key)) {
this->_batch_size = i->value.toNumber();
}
else if ("model" == std::string(i->key)) {
this->_model = i->value.toString();
}
else if ("inputSize" == std::string(i->key)) {
// std::cout << "inputSize (old, new): " << this->_input_w << ", " << i->value.toNumber() << std::endl;
this->_input_w = i->value.toNumber();

View File

@@ -1,7 +1,6 @@
#include "veri_det_cuda_impl.h"
#include <cmath>
#include <fstream>
#include "sv_util.h"
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
@@ -76,16 +75,6 @@ namespace sv
{
#ifdef WITH_CUDA
std::string trt_model_fn = get_home() + SV_MODEL_DIR + "veri.engine";
std::vector<std::string> files;
_list_dir(get_home() + SV_MODEL_DIR, files, "-online.engine", "Nv-VERI-mobilenet_v3");
if (files.size() > 0)
{
std::sort(files.rbegin(), files.rend(), _comp_str_lesser);
trt_model_fn = get_home() + SV_MODEL_DIR + files[0];
}
std::cout << "Load: " << trt_model_fn << std::endl;
if (!is_file_exist(trt_model_fn))
{
throw std::runtime_error("SpireCV (104) Error loading the VeriDetector TensorRT model (File Not Exist)");

View File

@@ -1,112 +0,0 @@
#include "veri_det_intel_impl.h"
#include <cmath>
#include <fstream>
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
#include <iostream>
#include <cmath>
int BAT = 1;
float cosineSimilarity(float *vec1, float *vec2, int size)
{
// 计算向量的点积
float dotProduct = 0.0f;
for (int i = 0; i < size; ++i)
{
dotProduct += vec1[i] * vec2[i];
}
// 计算向量的模长
float magnitudeVec1 = 0.0f;
float magnitudeVec2 = 0.0f;
for (int i = 0; i < size; ++i)
{
magnitudeVec1 += vec1[i] * vec1[i];
magnitudeVec2 += vec2[i] * vec2[i];
}
magnitudeVec1 = std::sqrt(magnitudeVec1);
magnitudeVec2 = std::sqrt(magnitudeVec2);
// 计算余弦相似性
float similarity = dotProduct / (magnitudeVec1 * magnitudeVec2);
return similarity;
}
namespace sv
{
#ifdef WITH_INTEL
using namespace cv;
using namespace std;
using namespace dnn;
#endif
VeriDetectorIntelImpl::VeriDetectorIntelImpl()
{
}
VeriDetectorIntelImpl::~VeriDetectorIntelImpl()
{
}
bool VeriDetectorIntelImpl::intelSetup()
{
#ifdef WITH_INTEL
std::string onnx_model_fn = get_home() + SV_MODEL_DIR + "veri.onnx";
if (!is_file_exist(onnx_model_fn))
{
throw std::runtime_error("SpireCV (104) Error loading the VeriDetector openVINO model (File Not Exist)");
}
// OpenVINO
ov::Core core;
this->compiled_model = core.compile_model(onnx_model_fn, "GPU");
this->infer_request = compiled_model.create_infer_request();
return true;
#endif
return false;
}
void VeriDetectorIntelImpl::intelRoiCNN(
std::vector<cv::Mat> &input_rois_,
std::vector<float> &output_labels_)
{
#ifdef WITH_INTEL
Mat blobs;
blobFromImages(input_rois_, blobs, 1 / 255.0, Size(224, 224), Scalar(0, 0, 0), true, true);
auto input_port = compiled_model.input();
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blobs.ptr(0));
infer_request.infer();
const ov::Tensor &label_pre = infer_request.get_output_tensor(0);
this->_p_prob1 = label_pre.data<float>();
const ov::Tensor &proto_pre = infer_request.get_output_tensor(1);
this->_p_prob2 = proto_pre.data<float>();
// Find max index
double max = 0;
int label = 0;
for (int i = 0; i < 576; ++i)
{
if (max < this->_p_prob1[i])
{
max = this->_p_prob1[i];
label = i;
}
}
float similarity = cosineSimilarity(this->_p_prob2, this->_p_prob2 + 1280, 1280);
output_labels_.push_back(label);
output_labels_.push_back(similarity);
#endif
}
}

View File

@@ -1,41 +0,0 @@
#ifndef __SV_VERI_DET_INTEL__
#define __SV_VERI_DET_INTEL__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
#ifdef WITH_INTEL
#include <openvino/openvino.hpp>
#endif
namespace sv
{
class VeriDetectorIntelImpl
{
public:
VeriDetectorIntelImpl();
~VeriDetectorIntelImpl();
bool intelSetup();
void intelRoiCNN(
std::vector<cv::Mat> &input_rois_,
std::vector<float> &output_labels_);
#ifdef WITH_INTEL
float *_p_data;
float *_p_prob1;
float *_p_prob2;
ov::Tensor input_tensor;
ov::InferRequest infer_request;
ov::CompiledModel compiled_model;
#endif
};
}
#endif

View File

@@ -10,11 +10,6 @@
#include "veri_det_cuda_impl.h"
#endif
#ifdef WITH_INTEL
#include <openvino/openvino.hpp>
#include "veri_det_intel_impl.h"
#endif
#define SV_ROOT_DIR "/SpireCV/"
namespace sv
@@ -25,10 +20,6 @@ namespace sv
#ifdef WITH_CUDA
this->_cuda_impl = new VeriDetectorCUDAImpl;
#endif
#ifdef WITH_INTEL
this->_intel_impl = new VeriDetectorIntelImpl;
#endif
}
VeriDetector::~VeriDetector()
{
@@ -59,9 +50,6 @@ namespace sv
return this->_cuda_impl->cudaSetup();
#endif
#ifdef WITH_INTEL
return this->_intel_impl->intelSetup();
#endif
return false;
}
@@ -75,12 +63,6 @@ namespace sv
input_rois_,
output_labels_);
#endif
#ifdef WITH_INTEL
this->_intel_impl->intelRoiCNN(
input_rois_,
output_labels_);
#endif
}
void VeriDetector::detect(cv::Mat img_, const cv::Rect &bounding_box_, sv::Target &tgt)
@@ -112,23 +94,26 @@ namespace sv
cv::Mat img_ground = cv::imread(img_ground_dir);
cv::resize(img_ground, img_ground, cv::Size(224, 224));
std::vector<cv::Mat> input_rois_ = {crop, img_ground};
std::vector<float> output_labels;
#ifdef WITH_CUDA
roiCNN(input_rois_, output_labels);
#endif
#ifdef WITH_INTEL
#ifdef WITH_CUDA
std::vector<float> output_labels;
roiCNN(input_rois_, output_labels);
#endif
// auto t1 = std::chrono::system_clock::now();
// tgts_.setFPS(1000.0 / std::chrono::duration_cast<std::chrono::milliseconds>(t1 - this->_t0).count());
// this->_t0 = std::chrono::system_clock::now();
// tgts_.setTimeNow();
if (output_labels.size() > 0)
{
// tgt.category_id = output_labels[0];
tgt.sim_score = output_labels[1];
// tgts_.targets.push_back(tgt);
}
#endif
}
void VeriDetector::getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz)
void VeriDetector::getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz)
{
cv::Scalar avgChans = mean(srcImg);
cv::Size imgSz = srcImg.size();

View File

@@ -28,7 +28,6 @@ GX40GimbalDriver::GX40GimbalDriver(amovGimbal::IOStreamBase *_IO) : amovGimbal::
targetPos[2] = 0;
parserState = GX40::GIMBAL_FRAME_PARSER_STATE_IDLE;
upDataTs = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()) - std::chrono::milliseconds(2000);
}
/**
@@ -108,8 +107,7 @@ uint32_t GX40GimbalDriver::pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payl
// 惯导数据填充
std::chrono::milliseconds nowTs = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch());
// over 1s GNSS has losed
if ((nowTs.count() - upDataTs.count()) < std::chrono::milliseconds(1500).count() &&
cmd == GX40::GIMBAL_CMD_NOP)
if ((nowTs.count() - upDataTs.count()) < std::chrono::milliseconds(1500).count())
{
primary->selfRoll = (int16_t)(-(carrierPos.roll / 0.01f));
primary->selfPitch = (int16_t)(-(carrierPos.pitch / 0.01f));

View File

@@ -46,7 +46,6 @@ class ArucoDetector : public CameraAlgorithm
public:
ArucoDetector();
void detect(cv::Mat img_, TargetsInFrame& tgts_);
void getIdsWithLengths(std::vector<int>& ids_, std::vector<double>& lengths_);
private:
void _load();
bool _params_loaded;
@@ -140,8 +139,6 @@ public:
double getThrsConf();
int useWidthOrHeight();
bool withSegmentation();
std::string getModel();
int getBatchSize();
protected:
virtual bool setupImpl();
virtual void detectImpl(
@@ -168,8 +165,6 @@ protected:
double _thrs_conf;
int _use_width_or_height;
bool _with_segmentation;
std::string _model;
int _batch_size;
};

79
include/sv_camera.h Normal file
View File

@@ -0,0 +1,79 @@
#ifndef __SV_CAMERA__
#define __SV_CAMERA__
#include "opencv2/opencv.hpp"
namespace sv
{
enum class CameraType : int
{
// 未知相机
NONE = 0X00001000,
// 标准opencv框架下的相机
V4L2CAM = 0X00001001,
WEBCAM = 0X00001002,
// 视频文件
VIDEOFILE = 0X00002001,
// 普通流媒体相机
STREAMING = 0X00004001,
// 普通的MIPI-CSI相机
MIPI = 0X00008001,
// 进行了传输优化适配的相机
G1 = 0X00010001,
Q10 = 0X00010002,
GX40 = 0X00010003,
MC1 = 0X00010004,
// 虚拟的相机设备 主要仿真接入
VIRTUAL = 0X00020001,
};
class Camera
{
private:
void *dev;
public:
// 构建时根据相机类型实例化对应的相机类
// timeOut: 相机连接超时时间;超过该时间没有读取到新的帧,则认为相机失去连接 单位ms
Camera(CameraType type, int timeOut = 500);
// 基本配置 必须调用下述的一个接口后才能open
bool setStream(const std::string &ip, int port);
bool setName(const std::string &name);
bool setIndex(int index);
// 基础功能
bool open(void);
bool read(cv::Mat &image);
bool readNoBlock(cv::Mat &image);
void release(void);
// 属性配置
void setWH(int width, int height);
void setFps(int fps);
void setBrightness(double brightness);
void setContrast(double contrast);
void setSaturation(double saturation);
void setHue(double hue);
void setExposure(double exposure);
// 获取属性
bool isActive(void);
CameraType getType(void);
std::string getName(void);
int getW(void);
int getH(void);
int getExpectFps(void);
double getFps(void);
double getBrightness(void);
double getContrast(void);
double getSaturation(void);
double getHue(void);
double getExposure(void);
~Camera();
};
}
#endif

View File

@@ -25,6 +25,7 @@ public:
double line_location_a2;
bool is_load_parameter;
std::string line_color;
protected:
@@ -39,4 +40,4 @@ protected:
void seg(cv::Mat line_area_, cv::Mat line_area_a1_, cv::Mat line_area_a2_, std::string line_color_, cv::Point &center_, int &area_, cv::Point &center_a1_, cv::Point &center_a2_);
};
}
#endif
#endif

View File

@@ -8,36 +8,33 @@
#include <string>
#include <chrono>
namespace sv
{
namespace sv {
class CommonObjectDetectorCUDAImpl;
class CommonObjectDetectorIntelImpl;
class CommonObjectDetector : public CommonObjectDetectorBase
{
public:
CommonObjectDetector(bool input_4k = false);
CommonObjectDetector(bool input_4k=false);
~CommonObjectDetector();
protected:
bool setupImpl();
void detectImpl(
cv::Mat img_,
std::vector<float> &boxes_x_,
std::vector<float> &boxes_y_,
std::vector<float> &boxes_w_,
std::vector<float> &boxes_h_,
std::vector<int> &boxes_label_,
std::vector<float> &boxes_score_,
std::vector<cv::Mat> &boxes_seg_);
CommonObjectDetectorCUDAImpl *_cuda_impl;
CommonObjectDetectorIntelImpl *_intel_impl;
std::vector<float>& boxes_x_,
std::vector<float>& boxes_y_,
std::vector<float>& boxes_w_,
std::vector<float>& boxes_h_,
std::vector<int>& boxes_label_,
std::vector<float>& boxes_score_,
std::vector<cv::Mat>& boxes_seg_
);
CommonObjectDetectorCUDAImpl* _cuda_impl;
bool _input_4k;
};
}
#endif

View File

@@ -12,7 +12,6 @@
namespace sv {
class LandingMarkerDetectorCUDAImpl;
class LandingMarkerDetectorIntelImpl;
class LandingMarkerDetector : public LandingMarkerDetectorBase
{
@@ -26,8 +25,7 @@ protected:
std::vector<int>& output_labels_
);
LandingMarkerDetectorCUDAImpl *_cuda_impl;
LandingMarkerDetectorIntelImpl *_intel_impl;
LandingMarkerDetectorCUDAImpl* _cuda_impl;
};

View File

@@ -8,8 +8,8 @@
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
//#include <Eigen/Dense>
#include <eigen3/Eigen/Dense>
#include <Eigen/Dense>
namespace sv {

View File

@@ -11,34 +11,33 @@
namespace sv
{
class VeriDetectorCUDAImpl;
class VeriDetectorIntelImpl;
class VeriDetectorCUDAImpl;
class VeriDetector : public LandingMarkerDetectorBase
{
public:
VeriDetector();
~VeriDetector();
class VeriDetector : public LandingMarkerDetectorBase
{
public:
VeriDetector();
~VeriDetector();
void detect(cv::Mat img_, const cv::Rect &bounding_box_, sv::Target &tgt);
void detect(cv::Mat img_, const cv::Rect &bounding_box_, sv::Target &tgt);
protected:
void _load();
bool setupImpl();
void roiCNN(
std::vector<cv::Mat> &input_rois_,
std::vector<float> &output_labels_);
void getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz);
protected:
void _load();
bool setupImpl();
void roiCNN(
std::vector<cv::Mat> &input_rois_,
std::vector<float> &output_labels_);
void getSubwindow(cv::Mat &dstCrop, cv::Mat &srcImg, int originalSz, int resizeSz);
std::string vehicle_id;
std::string vehicle_id;
// Save the target bounding box for each frame.
std::vector<float> targetSz = {0, 0}; // H and W of bounding box
std::vector<float> targetPos = {0, 0}; // center point of bounding box (x, y)
// Save the target bounding box for each frame.
std::vector<float> targetSz = {0, 0}; // H and W of bounding box
std::vector<float> targetPos = {0, 0}; // center point of bounding box (x, y)
VeriDetectorCUDAImpl *_cuda_impl;
VeriDetectorIntelImpl *_intel_impl;
};
VeriDetectorCUDAImpl *_cuda_impl;
};
}
#endif

View File

@@ -128,7 +128,6 @@ public:
bool getBox(Box& b);
bool getAruco(int& id, std::vector<cv::Point2f> &corners);
bool getAruco(int& id, std::vector<cv::Point2f> &corners, cv::Vec3d &rvecs, cv::Vec3d &tvecs);
bool getEllipse(double& xc_, double& yc_, double& a_, double& b_, double& rad_);
std::string getJsonStr();
@@ -327,66 +326,59 @@ protected:
};
enum class CameraType {NONE, WEBCAM, V4L2CAM, MIPI, RTSP, VIDEO, G1, Q10, GX40};
// enum class CameraType {NONE, WEBCAM, V4L2CAM, G1, Q10, MIPI, GX40};
class CameraBase {
public:
CameraBase(CameraType type=CameraType::NONE, int id=0);
~CameraBase();
void open(CameraType type=CameraType::V4L2CAM, int id=0);
bool read(cv::Mat& image);
void release();
// class CameraBase {
// public:
// CameraBase(CameraType type=CameraType::NONE, int id=0);
// ~CameraBase();
// void open(CameraType type=CameraType::WEBCAM, int id=0);
// bool read(cv::Mat& image);
// void release();
int getW();
int getH();
int getFps();
std::string getIp();
int getPort();
double getBrightness();
double getContrast();
double getSaturation();
double getHue();
double getExposure();
std::string getFourcc();
bool isRunning();
void setFourcc(std::string fourcc);
void setWH(int width, int height);
void setFps(int fps);
void setIp(std::string ip);
void setRtspUrl(std::string rtsp_url);
void setVideoPath(std::string video_path);
void setPort(int port);
void setBrightness(double brightness);
void setContrast(double contrast);
void setSaturation(double saturation);
void setHue(double hue);
void setExposure(double exposure);
protected:
virtual void openImpl();
void _run();
// int getW();
// int getH();
// int getFps();
// std::string getIp();
// int getPort();
// double getBrightness();
// double getContrast();
// double getSaturation();
// double getHue();
// double getExposure();
// bool isRunning();
// void setWH(int width, int height);
// void setFps(int fps);
// void setIp(std::string ip);
// void setPort(int port);
// void setBrightness(double brightness);
// void setContrast(double contrast);
// void setSaturation(double saturation);
// void setHue(double hue);
// void setExposure(double exposure);
// protected:
// virtual void openImpl();
// void _run();
bool _is_running;
bool _is_updated;
std::thread _tt;
cv::VideoCapture _cap;
cv::Mat _frame;
CameraType _type;
int _camera_id;
// bool _is_running;
// bool _is_updated;
// std::thread _tt;
// cv::VideoCapture _cap;
// cv::Mat _frame;
// CameraType _type;
// int _camera_id;
int _width;
int _height;
int _fps;
std::string _ip;
std::string _rtsp_url;
std::string _video_path;
int _port;
double _brightness;
double _contrast;
double _saturation;
double _hue;
double _exposure;
std::string _fourcc;
};
// int _width;
// int _height;
// int _fps;
// std::string _ip;
// int _port;
// double _brightness;
// double _contrast;
// double _saturation;
// double _hue;
// double _exposure;
// };
void drawTargetsInFrame(
@@ -401,16 +393,6 @@ void drawTargetsInFrame(
bool with_aruco=false,
bool with_yaw=false
);
cv::Mat drawTargetsInFrame(
const cv::Mat img_,
const TargetsInFrame& tgts_,
const double scale,
bool with_all=true,
bool with_category=false,
bool with_tid=false,
bool with_seg=false,
bool with_box=false
);
std::string get_home();
bool is_file_exist(std::string& fn);
void list_dir(std::string dir, std::vector<std::string>& files, std::string suffixs="", bool r=false);

View File

@@ -1,3 +1,4 @@
#if 0
#ifndef __SV_VIDEO_INPUT__
#define __SV_VIDEO_INPUT__
@@ -25,3 +26,4 @@ protected:
}
#endif
#endif

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 640,
"withSegmentation": true,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": true,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 1280,
"withSegmentation": false,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": false,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 640,
"withSegmentation": true,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": true,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 640,
"withSegmentation": false,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": false,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 1280,
"withSegmentation": false,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": false,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 640,
"withSegmentation": false,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": true,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 640,
"withSegmentation": true,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": true,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 640,
"withSegmentation": true,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": true,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 640,
"withSegmentation": true,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": true,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -1,13 +1,11 @@
{
"CommonObjectDetector": {
"dataset": "COCO",
"model": "s",
"batchSize": 1,
"inputSize": 640,
"withSegmentation": true,
"nmsThrs": 0.6,
"scoreThrs": 0.4,
"useWidthOrHeight": 1,
"withSegmentation": true,
"datasetPersonVehicle": {
"person": [0.5, 1.8],
"car": [4.1, 1.5],

View File

@@ -39,6 +39,8 @@ the use of this software, even if advised of the possibility of such damage.
#include <sv_world.h>
#include "aruco_samples_utility.hpp"
#include "sv_camera.h"
using namespace std;
using namespace cv;
@@ -146,10 +148,14 @@ int main(int argc, char *argv[]) {
}
// VideoCapture inputVideo;
sv::Camera inputVideo;
sv::Camera inputVideo(c_type);
inputVideo.setIndex(camId);
inputVideo.setStream("192.168.144.64",554);
inputVideo.setWH(imW, imH);
inputVideo.setFps(fps);
inputVideo.open(c_type, camId);
inputVideo.open();
int waitTime = 10;

View File

@@ -97,8 +97,7 @@ int main(int argc, char *argv[]) {
if (landingPadUnit > 0) {
Mat markerImgBIG = Mat::ones(landingPadUnit * 62, landingPadUnit * 62, CV_8UC1) * 255;
// markerId = 0;
markerId --;
markerId = 0;
markerSize = landingPadUnit * 4;
borderSize = landingPadUnit;
int newSize = markerSize + borderSize * 2;
@@ -223,7 +222,7 @@ int main(int argc, char *argv[]) {
markerImg = create_marker_with_borders(dictionary, markerId, markerSize, borderBits, borderSize);
markerImg.copyTo(markerImgBIG(Rect(landingPadUnit * 10, landingPadUnit * 28, newSize, newSize)));
// markerId = 90;
markerId = 90;
markerSize = landingPadUnit * 4 * 4;
borderSize = landingPadUnit * 4;
newSize = markerSize + borderSize * 2;

View File

@@ -2,6 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
@@ -9,14 +10,15 @@ int main(int argc, char *argv[]) {
// 实例化Aruco检测器类
sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
ad.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(ad.image_width, ad.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;

View File

@@ -2,15 +2,17 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
int main(int argc, char *argv[]) {
// 打开摄像头
sv::Camera cap;
cap.setWH(1280, 720);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
while (1)

View File

@@ -2,6 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
using namespace sv;
@@ -11,14 +12,14 @@ int main(int argc, char *argv[])
// 实例化 color line detection 检测器类
sv::ColorLineDetector cld;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cld.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
cld.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
cld.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(cld.image_width, cld.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;

View File

@@ -2,6 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
@@ -9,14 +10,14 @@ int main(int argc, char *argv[]) {
// 实例化 通用目标 检测器类
sv::CommonObjectDetector cod;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(cod.image_width, cod.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;

View File

@@ -2,6 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
@@ -40,20 +41,19 @@ int main(int argc, char *argv[]) {
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
sot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(cod.image_width, cod.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;

View File

@@ -2,6 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
@@ -9,14 +10,15 @@ int main(int argc, char *argv[]) {
// 实例化 椭圆 检测器类
sv::EllipseDetector ed;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ed.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
ed.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
ed.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(ed.image_width, ed.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;

View File

@@ -3,6 +3,7 @@
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include <map>
#include "sv_camera.h"
// 定义窗口名称
static const std::string RGB_WINDOW = "Image window";
@@ -34,36 +35,44 @@ void gimbalNoTrack(void)
int main(int argc, char *argv[])
{
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// 实例化吊舱
gimbal = new sv::Gimbal(sv::GimbalType::G1, sv::GimbalLink::SERIAL);
// 使用 /dev/ttyUSB0 作为控制串口
gimbal->setSerialPort("/dev/ttyUSB0");
// 以GimableCallback作为状态回调函数启用吊舱控制
gimbal->open(GimableCallback);
// 定义相机
sv::Camera cap;
// 设置相机流媒体地址为 192.168.2.64
cap.setIp("192.168.2.64");
// 设置获取画面分辨率为720P
cap.setWH(1280, 720);
// 设置视频帧率为30帧
// 打开摄像头
sv::Camera cap(sv::CameraType::G1);
cap.setStream("192.168.2.64",554);
cap.setWH(cod.image_width, cod.image_height);
cap.setFps(30);
// 开启相机
cap.open(sv::CameraType::G1);
cap.open(); // CameraID 0
// // 定义相机
// sv::Camera cap;
// // 设置相机流媒体地址为 192.168.2.64
// cap.setIp("192.168.2.64");
// // 设置获取画面分辨率为720P
// cap.setWH(1280, 720);
// // 设置视频帧率为30帧
// cap.setFps(30);
// // 开启相机
// cap.open(sv::CameraType::G1);
// 定义一个新的窗口,可在上面进行框选操作
cv::namedWindow(RGB_WINDOW);
// 设置窗口操作回调函数,该函数实现整个框选逻辑
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
sot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
@@ -258,4 +267,4 @@ void GimableCallback(double &frame_ang_r, double &frame_ang_p, double &frame_ang
count = 0;
}
count++;
}
}

View File

@@ -3,6 +3,7 @@
// 包含SpireCV SDK头文件
#include <sv_world.h>
// #include "gimbal_tools.hpp"
#include "sv_camera.h"
using namespace std;
@@ -33,33 +34,40 @@ void gimbalNoTrack(void)
}
int main(int argc, char *argv[])
{
{
// 实例化 圆形降落标志 检测器类
sv::LandingMarkerDetector lmd;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
lmd.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// 实例化吊舱
gimbal = new sv::Gimbal(sv::GimbalType::G1, sv::GimbalLink::SERIAL);
// 使用 /dev/ttyUSB0 作为控制串口
gimbal->setSerialPort("/dev/ttyUSB0");
// 以GimableCallback作为状态回调函数启用吊舱控制
gimbal->open(GimableCallback);
// 定义相机
sv::Camera cap;
// 设置相机流媒体地址为 192.168.2.64
cap.setIp("192.168.2.64");
// 设置获取画面分辨率为720P
cap.setWH(1280, 720);
// 设置视频帧率为30帧
// 打开摄像头
sv::Camera cap(sv::CameraType::G1);
cap.setStream("192.168.2.64", 554);
cap.setWH(lmd.image_width, lmd.image_height);
cap.setFps(30);
// 开启相机
cap.open(sv::CameraType::G1);
cap.open(); // CameraID 0
// // 定义相机
// sv::Camera cap;
// // 设置相机流媒体地址为 192.168.2.64
// cap.setIp("192.168.2.64");
// // 设置获取画面分辨率为720P
// cap.setWH(1280, 720);
// // 设置视频帧率为30帧
// cap.setFps(30);
// // 开启相机
// cap.open(sv::CameraType::G1);
// 定义一个窗口 才可以在上面操作
cv::namedWindow(RGB_WINDOW);
// 设置窗口操作回调函数,该函数实现吊舱控制
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
// 实例化 圆形降落标志 检测器类
sv::LandingMarkerDetector lmd;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
lmd.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
lmd.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
@@ -145,4 +153,4 @@ void GimableCallback(double &frame_ang_r, double &frame_ang_p, double &frame_ang
count = 0;
}
count++;
}
}

View File

@@ -2,6 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
@@ -42,33 +43,41 @@ static const std::string RGB_WINDOW = "Image window";
void onMouse(int event, int x, int y, int, void *);
int main(int argc, char *argv[])
{
{
// 实例化Aruco检测器类
sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// 实例化吊舱
gimbal = new sv::Gimbal(sv::GimbalType::G1, sv::GimbalLink::SERIAL);
// 使用 /dev/ttyUSB0 作为控制串口
gimbal->setSerialPort("/dev/ttyUSB0");
// 以gimableCallback作为状态回调函数启用吊舱控制
gimbal->open(gimableCallback);
// 定义相机
sv::Camera cap;
// 设置相机流媒体地址为 192.168.2.64
cap.setIp("192.168.2.64");
// 设置获取画面分辨率为720P
cap.setWH(1280, 720);
// 设置视频帧率为30帧
// 打开摄像头
sv::Camera cap(sv::CameraType::G1);
cap.setStream("192.168.2.64",554);
cap.setWH(ad.image_width, ad.image_height);
cap.setFps(30);
// 开启相机
cap.open(sv::CameraType::G1);
cap.open(); // CameraID 0
// // 定义相机
// sv::Camera cap;
// // 设置相机流媒体地址为 192.168.2.64
// cap.setIp("192.168.2.64");
// // 设置获取画面分辨率为720P
// cap.setWH(1280, 720);
// // 设置视频帧率为30帧
// cap.setFps(30);
// // 开启相机
// cap.open(sv::CameraType::G1);
// 定义一个窗口 才可以在上面操作
cv::namedWindow(RGB_WINDOW);
// 设置窗口操作回调函数,该函数实现吊舱控制
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
// 实例化Aruco检测器类
sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
ad.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
sv::UDPServer udp;
// 实例化OpenCV的Mat类用于内存单帧图像

View File

@@ -2,21 +2,22 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
int main(int argc, char *argv[]) {
// 实例化 圆形降落标志 检测器类
sv::LandingMarkerDetector lmd;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
lmd.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
lmd.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
lmd.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// 打开摄像头
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(lmd.image_width, lmd.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;

View File

@@ -2,6 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
@@ -9,19 +10,19 @@ int main(int argc, char *argv[]) {
// 实例化
sv::CommonObjectDetector cod;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
sv::MultipleObjectTracker mot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
mot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
mot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
mot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
mot.init(&cod);
// 打开摄像头
sv::Camera cap;
cap.setWH(mot.image_width, mot.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// 打开摄像头
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(cod.image_width, cod.image_height);
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;

View File

@@ -2,7 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
// 定义窗口名称
@@ -28,15 +28,17 @@ int main(int argc, char *argv[]) {
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1920x1080.yaml");
sot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
sot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
// 打开摄像头
sv::Camera cap;
// 打开摄像头
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(sot.image_width, sot.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;

View File

@@ -5,7 +5,7 @@
#include <stdio.h>
#define SERV_PORT 20166
typedef unsigned char byte;
using namespace std;
@@ -23,9 +23,9 @@ int main(int argc, char *argv[]) {
int upd_msg_len = 1024 * 6; // max_objects = 100
unsigned char upd_msg[upd_msg_len];
byte upd_msg[upd_msg_len];
int msg_queue_len = 1024 * 1024; // 1M
unsigned char msg_queue[msg_queue_len];
byte msg_queue[msg_queue_len];
int addr_len = sizeof(struct sockaddr_in);
int start_index = 0, end_index = 0;
@@ -63,21 +63,21 @@ cout << n << ", " << start_index << ", " << end_index << endl;
if (end_index - i >= ilen + 2 && msg_queue[i+ilen] == 0xFB && msg_queue[i+ilen+1] == 0xFD)
{
cout << "FOUND 0xFAFC & 0xFBFD" << endl;
unsigned char* msg_type = reinterpret_cast<unsigned char*>(&msg_queue[i+2]);
byte* msg_type = reinterpret_cast<byte*>(&msg_queue[i+2]);
cout << "Type: " << (int) *msg_type << endl;
unsigned short* year = reinterpret_cast<unsigned short*>(&msg_queue[i+7]);
unsigned char* month = reinterpret_cast<unsigned char*>(&msg_queue[i+9]);
unsigned char* day = reinterpret_cast<unsigned char*>(&msg_queue[i+10]);
unsigned char* hour = reinterpret_cast<unsigned char*>(&msg_queue[i+11]);
unsigned char* minute = reinterpret_cast<unsigned char*>(&msg_queue[i+12]);
unsigned char* second = reinterpret_cast<unsigned char*>(&msg_queue[i+13]);
byte* month = reinterpret_cast<byte*>(&msg_queue[i+9]);
byte* day = reinterpret_cast<byte*>(&msg_queue[i+10]);
byte* hour = reinterpret_cast<byte*>(&msg_queue[i+11]);
byte* minute = reinterpret_cast<byte*>(&msg_queue[i+12]);
byte* second = reinterpret_cast<byte*>(&msg_queue[i+13]);
unsigned short* millisecond = reinterpret_cast<unsigned short*>(&msg_queue[i+14]);
cout << "Time: " << *year << "-" << (int) *month << "-" << (int) *day << " " << (int) *hour << ":" << (int) *minute << ":" << (int) *second << " " << *millisecond << endl;
unsigned char* index_d1 = reinterpret_cast<unsigned char*>(&msg_queue[i+16]);
unsigned char* index_d2 = reinterpret_cast<unsigned char*>(&msg_queue[i+17]);
unsigned char* index_d3 = reinterpret_cast<unsigned char*>(&msg_queue[i+18]);
unsigned char* index_d4 = reinterpret_cast<unsigned char*>(&msg_queue[i+19]);
byte* index_d1 = reinterpret_cast<byte*>(&msg_queue[i+16]);
byte* index_d2 = reinterpret_cast<byte*>(&msg_queue[i+17]);
byte* index_d3 = reinterpret_cast<byte*>(&msg_queue[i+18]);
byte* index_d4 = reinterpret_cast<byte*>(&msg_queue[i+19]);
int mp = i+20;
if ((*index_d4) & 0x01 == 0x01)
{
@@ -152,13 +152,13 @@ cout << n << ", " << start_index << ", " << end_index << endl;
}
for (int j=0; j<n_objects; j++)
{
unsigned char* index_f1 = reinterpret_cast<unsigned char*>(&msg_queue[mp]);
byte* index_f1 = reinterpret_cast<byte*>(&msg_queue[mp]);
mp++;
unsigned char* index_f2 = reinterpret_cast<unsigned char*>(&msg_queue[mp]);
byte* index_f2 = reinterpret_cast<byte*>(&msg_queue[mp]);
mp++;
unsigned char* index_f3 = reinterpret_cast<unsigned char*>(&msg_queue[mp]);
byte* index_f3 = reinterpret_cast<byte*>(&msg_queue[mp]);
mp++;
unsigned char* index_f4 = reinterpret_cast<unsigned char*>(&msg_queue[mp]);
byte* index_f4 = reinterpret_cast<byte*>(&msg_queue[mp]);
mp++;
if ((*index_f4) & 0x01 == 0x01 && (*index_f4) & 0x02 == 0x02)
{

View File

@@ -2,20 +2,21 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
int main(int argc, char *argv[]) {
// 实例化Aruco检测器类
sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams("/home/amov/SpireCV/calib_webcam_1280x720.yaml");
ad.loadCameraParams("/home/amov/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// 打开摄像头
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(ad.image_width, ad.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
sv::UDPServer udp;
// 实例化OpenCV的Mat类用于内存单帧图像

View File

@@ -2,7 +2,7 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
struct node
@@ -20,18 +20,17 @@ int main(int argc, char *argv[])
// 实例化 框选目标跟踪类
sv::VeriDetector veri;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
veri.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
veri.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
veri.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// 打开摄像头
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(cod.image_width, cod.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;

View File

@@ -2,21 +2,22 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
int main(int argc, char *argv[]) {
// 实例化 通用目标 检测器类
sv::CommonObjectDetector cod;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml");
cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json");
cod.loadCameraParams("/home/amov/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// 打开摄像头
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(cod.image_width, cod.image_height);
cap.setFps(30);
cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0
// cap.setFps(30);
cap.open(); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
@@ -24,7 +25,7 @@ int main(int argc, char *argv[]) {
// 实例化视频保存类
sv::VideoWriter vw;
// 设置保存路径"/home/amov/Videos"保存图像尺寸640480帧频25Hz同步保存检测结果.svj
vw.setup(sv::get_home() + "/Videos", cv::Size(cod.image_width, cod.image_height), 25, true);
vw.setup("/home/amov/Videos", cv::Size(640, 480), 25, true);
while (1)
{
@@ -32,7 +33,7 @@ int main(int argc, char *argv[]) {
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(cod.image_width, cod.image_height));
cv::resize(img, img, cv::Size(640, 480));
// 执行通用目标检测
cod.detect(img, tgts);

View File

@@ -2,15 +2,17 @@
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include "sv_camera.h"
using namespace std;
int main(int argc, char *argv[]) {
// 打开摄像头
sv::Camera cap;
// cap.setWH(1280, 720);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 打开摄像头
sv::Camera cap(sv::CameraType::WEBCAM);
cap.setIndex(0);
cap.setWH(1280, 720);
cap.setFps(30);
cap.open(); // CameraID 0
// 实例化视频推流类sv::VideoStreamer
sv::VideoStreamer streamer;

View File

@@ -3,6 +3,7 @@
// 包含SpireCV SDK头文件
#include <sv_world.h>
#include <chrono>
#include "sv_camera.h"
// yaw roll pitch
double gimbalEulerAngle[3];
@@ -143,14 +144,20 @@ int main(int argc, char *argv[])
std::cout << " pass... " << std::endl;
std::cout << " start image test " << std::endl;
sv::Camera cap;
cap.setIp(argv[2]);
// 打开摄像头
sv::Camera cap(sv::CameraType::G1);
cap.setStream(argv[2], 554);
cap.setWH(1280, 720);
cap.setFps(30);
cap.open(); // CameraID 0
cap.open(sv::CameraType::G1);
// sv::Camera cap;
// cap.setIp(argv[2]);
// cap.setWH(1280, 720);
// cap.setFps(30);
// cap.open(sv::CameraType::G1);
if (!cap.isRunning())
if (!cap.isActive())
{
std::cout << " gimbal image error , failed !!!!!" << std::endl;
return -1;

0
scripts/common/configs-downloading.sh Executable file → Normal file
View File

0
scripts/common/download_test_videos.sh Executable file → Normal file
View File

1
scripts/common/ffmpeg425-install.sh Executable file → Normal file
View File

@@ -17,7 +17,6 @@ cd nv-codec-headers
git checkout n11.1.5.0
sudo make install
cd ..
sudo rm -r nv-codec-headers
wget https://ffmpeg.org/releases/ffmpeg-4.2.5.tar.bz2
tar -xjf ffmpeg-4.2.5.tar.bz2

0
scripts/common/gst-install-orin.sh Executable file → Normal file
View File

0
scripts/common/gst-install.sh Executable file → Normal file
View File

0
scripts/common/models-converting.sh Executable file → Normal file
View File

28
scripts/common/models-downloading.sh Executable file → Normal file
View File

@@ -3,6 +3,12 @@
root_dir=${HOME}"/SpireCV/models"
root_server="https://download.amovlab.com/model"
sv_params1=${HOME}"/SpireCV/sv_algorithm_params.json"
sv_params2=${HOME}"/SpireCV/sv_algorithm_params_coco_640.json"
sv_params3=${HOME}"/SpireCV/sv_algorithm_params_coco_1280.json"
camera_params1=${HOME}"/SpireCV/calib_webcam_640x480.yaml"
camera_params2=${HOME}"/SpireCV/calib_webcam_1280x720.yaml"
coco_model1="COCO-yolov5s.wts"
coco_model2="COCO-yolov5s6.wts"
coco_model3="COCO-yolov5s-seg.wts"
@@ -40,6 +46,28 @@ if [ ! -d ${root_dir} ]; then
mkdir -p ${root_dir}
fi
if [ ! -f ${sv_params1} ]; then
echo -e "\033[32m[INFO]: ${sv_params1} not exist, downloading ... \033[0m"
wget -O ${sv_params1} ${root_server}/install/a-params/sv_algorithm_params.json
fi
if [ ! -f ${sv_params2} ]; then
echo -e "\033[32m[INFO]: ${sv_params2} not exist, downloading ... \033[0m"
wget -O ${sv_params2} ${root_server}/install/a-params/sv_algorithm_params_coco_640.json
fi
if [ ! -f ${sv_params3} ]; then
echo -e "\033[32m[INFO]: ${sv_params3} not exist, downloading ... \033[0m"
wget -O ${sv_params3} ${root_server}/install/a-params/sv_algorithm_params_coco_1280.json
fi
if [ ! -f ${camera_params1} ]; then
echo -e "\033[32m[INFO]: ${camera_params1} not exist, downloading ... \033[0m"
wget -O ${camera_params1} ${root_server}/install/c-params/calib_webcam_640x480.yaml
fi
if [ ! -f ${camera_params2} ]; then
echo -e "\033[32m[INFO]: ${camera_params2} not exist, downloading ... \033[0m"
wget -O ${camera_params2} ${root_server}/install/c-params/calib_webcam_1280x720.yaml
fi
if [ ! -f ${coco_model1_fn} ]; then
echo -e "\033[32m[INFO]: ${coco_model1_fn} not exist, downloading ... \033[0m"
wget -O ${coco_model1_fn} ${root_server}/install/${coco_model1}

0
scripts/common/opencv470-install.sh Executable file → Normal file
View File

View File

@@ -1,39 +0,0 @@
#!/bin/bash -e
root_dir=${HOME}"/SpireCV/models"
root_server="https://download.amovlab.com/model"
sv_params1=${HOME}"/SpireCV/sv_algorithm_params.json"
sv_params2=${HOME}"/SpireCV/sv_algorithm_params_coco_640.json"
sv_params3=${HOME}"/SpireCV/sv_algorithm_params_coco_1280.json"
camera_params1=${HOME}"/SpireCV/calib_webcam_640x480.yaml"
camera_params2=${HOME}"/SpireCV/calib_webcam_1280x720.yaml"
if [ ! -d ${root_dir} ]; then
echo -e "\033[32m[INFO]: ${root_dir} not exist, creating it ... \033[0m"
mkdir -p ${root_dir}
fi
if [ ! -f ${sv_params1} ]; then
echo -e "\033[32m[INFO]: ${sv_params1} not exist, downloading ... \033[0m"
wget -O ${sv_params1} ${root_server}/install/a-params/sv_algorithm_params.json
fi
if [ ! -f ${sv_params2} ]; then
echo -e "\033[32m[INFO]: ${sv_params2} not exist, downloading ... \033[0m"
wget -O ${sv_params2} ${root_server}/install/a-params/sv_algorithm_params_coco_640.json
fi
if [ ! -f ${sv_params3} ]; then
echo -e "\033[32m[INFO]: ${sv_params3} not exist, downloading ... \033[0m"
wget -O ${sv_params3} ${root_server}/install/a-params/sv_algorithm_params_coco_1280.json
fi
if [ ! -f ${camera_params1} ]; then
echo -e "\033[32m[INFO]: ${camera_params1} not exist, downloading ... \033[0m"
wget -O ${camera_params1} ${root_server}/install/c-params/calib_webcam_640x480.yaml
fi
if [ ! -f ${camera_params2} ]; then
echo -e "\033[32m[INFO]: ${camera_params2} not exist, downloading ... \033[0m"
wget -O ${camera_params2} ${root_server}/install/c-params/calib_webcam_1280x720.yaml
fi

View File

@@ -1,24 +0,0 @@
#!/bin/sh
sudo apt install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
sudo apt install -y libgstreamer-plugins-bad1.0-dev gstreamer1.0-plugins-base
sudo apt install -y gstreamer1.0-plugins-good gstreamer1.0-plugins-bad
sudo apt install -y gstreamer1.0-plugins-ugly gstreamer1.0-libav gstreamer1.0-doc
sudo apt install -y gstreamer1.0-tools gstreamer1.0-x gstreamer1.0-alsa
sudo apt install -y gstreamer1.0-gl gstreamer1.0-gtk3 gstreamer1.0-qt5
sudo apt install -y gstreamer1.0-pulseaudio
sudo apt install -y gtk-doc-tools
sudo apt install -y libeigen3-dev libfmt-dev v4l-utils
sudo apt -y install autotools-dev automake m4 perl
sudo apt -y install libtool
autoreconf -ivf
git clone https://gitee.com/jario-jin/gst-rtsp-server-b18.git
cd gst-rtsp-server-b18
./autogen.sh
make
sudo make install
cd ..
sudo rm -r gst-rtsp-server-b18

View File

@@ -1,20 +0,0 @@
#!/bin/sh
sudo apt install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
sudo apt install -y libgstreamer-plugins-bad1.0-dev gstreamer1.0-plugins-base
sudo apt install -y gstreamer1.0-plugins-good gstreamer1.0-plugins-bad
sudo apt install -y gstreamer1.0-plugins-ugly gstreamer1.0-libav gstreamer1.0-doc
sudo apt install -y gstreamer1.0-tools gstreamer1.0-x gstreamer1.0-alsa
sudo apt install -y gstreamer1.0-gl gstreamer1.0-gtk3 gstreamer1.0-qt5
sudo apt install -y gstreamer1.0-pulseaudio
sudo apt install -y gtk-doc-tools
sudo apt install -y libeigen3-dev libfmt-dev v4l-utils
git clone https://gitee.com/jario-jin/gst-rtsp-server-b18.git
cd gst-rtsp-server-b18
./autogen.sh
make
sudo make install
cd ..
sudo rm -r gst-rtsp-server-b18

View File

@@ -1,70 +0,0 @@
#!/bin/sh
wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_cache_x86-4.7.0.zip
current_dir=$(pwd)
package_dir="."
mkdir ~/opencv_build
if [ ! -d ""$package_dir"" ];then
echo "\033[31m[ERROR]: $package_dir not exist!: \033[0m"
exit 1
fi
# sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
# sudo add-apt-repository "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports/ xenial main multiverse restricted universe"
sudo apt update
sudo apt install -y build-essential
sudo apt install -y cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32
sudo apt update
sudo apt install -y libjasper1 libjasper-dev
sudo apt install -y python3-dev python3-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev
sudo apt install -y libdc1394-22-dev
sudo apt install -y libcurl4 build-essential pkg-config cmake libopenblas-dev libeigen3-dev \
libtbb-dev libavcodec-dev libavformat-dev libgstreamer-plugins-base1.0-dev \
libgstreamer1.0-dev libswscale-dev libgtk-3-dev libpng-dev libjpeg-dev \
libcanberra-gtk-module libcanberra-gtk3-module
echo "\033[32m[INFO]:\033[0m unzip opencv-4.7.0.zip ..."
unzip -q -o $package_dir/opencv-4.7.0.zip -d ~/opencv_build
echo "\033[32m[INFO]:\033[0m unzip opencv_contrib-4.7.0.zip ..."
unzip -q -o $package_dir/opencv_contrib-4.7.0.zip -d ~/opencv_build
echo "\033[32m[INFO]:\033[0m unzip opencv_cache_x86-4.7.0.zip ..."
unzip -q -o $package_dir/opencv_cache_x86-4.7.0.zip -d ~/opencv_build
sudo rm opencv-4.7.0.zip
sudo rm opencv_contrib-4.7.0.zip
sudo rm opencv_cache_x86-4.7.0.zip
cd ~/opencv_build/opencv-4.7.0
mkdir .cache
cp -r ~/opencv_build/opencv_cache_x86-4.7.0/* ~/opencv_build/opencv-4.7.0/.cache/
mkdir build
cd build
cmake -D CMAKE_BUILD_TYPE=Release \
-D WITH_CUDA=OFF \
-D OPENCV_ENABLE_NONFREE=ON \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-4.7.0/modules ..
make -j2
sudo make install
cd
sudo rm -r ~/opencv_build
cd ${current_dir}

3
scripts/jetson/opencv470-jetpack511-cuda-install.sh Executable file → Normal file
View File

@@ -5,7 +5,7 @@ wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_cache-4.7.0.zip
current_dir=$(pwd)
package_dir="."
mkdir ~/opencv_build
@@ -52,4 +52,3 @@ sudo make install
cd
sudo rm -r ~/opencv_build
cd ${current_dir}

3
scripts/jetson/opencv470-jetpack511-install.sh Executable file → Normal file
View File

@@ -5,7 +5,7 @@ wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_cache-4.7.0.zip
current_dir=$(pwd)
package_dir="."
mkdir ~/opencv_build
@@ -52,4 +52,3 @@ sudo make install
cd
sudo rm -r ~/opencv_build
cd ${current_dir}

View File

@@ -1,119 +0,0 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import os
import requests
import argparse
root_url = "https://download.amovlab.com/model/SpireCV-models/"
model_list_url = root_url + "model-list.txt"
root_path = os.path.expanduser("~") + "/SpireCV/models"
print("MODEL PATH:", root_path)
if not os.path.exists(root_path):
os.makedirs(root_path)
list_file = os.path.join(root_path, "model-list.txt")
def main():
parser = argparse.ArgumentParser(description="SpireCV Model SYNC")
parser.add_argument(
"-p", "--platform",
type=str,
required=True,
help="Supported Platforms: nv (Nvidia), int (Intel)",
)
args = parser.parse_args()
if args.platform in ['nv', 'nvidia', 'Nv', 'Nvidia']:
prefix = 'Nv'
elif args.platform in ['int', 'intel', 'Int', 'Intel']:
prefix = 'Int'
else:
raise Exception("Platform NOT Support!")
r = requests.get(model_list_url)
with open(list_file, "wb") as f:
f.write(r.content)
with open(list_file, 'r') as file:
lines = file.readlines()
need_switch = False
for line in lines:
line = line.strip()
if len(line) > 0:
model_file = os.path.join(root_path, line)
if not os.path.exists(model_file) and (line.startswith(prefix) or line.startswith('Ocv')):
print("[1] Downloading Model:", line, "...")
r = requests.get(root_url + line)
with open(model_file, "wb") as f:
f.write(r.content)
need_switch = True
if os.path.exists(model_file):
print("[1] Model:", line, "EXIST!")
if line.startswith('Nv'):
net = line.split('-')[2]
if net.startswith("yolov5"):
if len(net.split('_')) == 3:
name, seg, ncls = net.split('_')
engine_fn = os.path.splitext(model_file)[0].replace(net, name + "_" + seg + '_b1_' + ncls) + '.engine'
online_fn = os.path.splitext(model_file)[0].replace(net, name + "_" + seg + '_b1_' + ncls) + '-online.engine'
else:
name, ncls = net.split('_')
engine_fn = os.path.splitext(model_file)[0].replace(net, name + '_b1_' + ncls) + '.engine'
online_fn = os.path.splitext(model_file)[0].replace(net, name + '_b1_' + ncls) + '-online.engine'
else:
engine_fn = os.path.splitext(model_file)[0] + '.engine'
online_fn = os.path.splitext(model_file)[0] + '-online.engine'
if not os.path.exists(engine_fn) and not os.path.exists(online_fn):
if net.startswith("yolov5"):
if len(net.split('_')) == 3:
name, seg, ncls = net.split('_')
cmd = "SpireCVSeg -s {} {} {} {}".format(
model_file, engine_fn, ncls[1:], name[6:]
)
else:
name, ncls = net.split('_')
cmd = "SpireCVDet -s {} {} {} {}".format(
model_file, engine_fn, ncls[1:], name[6:]
)
elif line.endswith("onnx"):
cmd = ("/usr/src/tensorrt/bin/trtexec --explicitBatch --onnx={} "
"--saveEngine={} --fp16").format(
model_file, engine_fn
)
print(" [2] Converting Model:", line, "->", engine_fn, "...")
result = os.popen(cmd).read()
need_switch = True
else:
print(" [2] Model Converting FINISH!")
model_file = engine_fn
if not line.startswith('Ocv') and need_switch:
ext = os.path.splitext(model_file)[1]
fn_prefix = '-'.join(os.path.basename(model_file).split('-')[:3])
file_names = os.listdir(root_path)
selected = []
for file_name in file_names:
if file_name.startswith(fn_prefix) and file_name.endswith(ext):
selected.append(file_name)
if len(selected) > 0:
for i, sel in enumerate(selected):
if sel.endswith('-online' + ext):
os.rename(
os.path.join(root_path, sel),
os.path.join(root_path, '-'.join(sel.split('-')[:4])) + ext
)
selected[i] = '-'.join(sel.split('-')[:4]) + ext
selected.sort(reverse=True)
os.rename(
os.path.join(root_path, selected[0]),
os.path.join(root_path, os.path.splitext(selected[0])[0] + "-online" + ext)
)
online_model = os.path.splitext(selected[0])[0] + "-online" + ext
print(" [3] Model {} ONLINE *".format(online_model))
if __name__ == "__main__":
main()

View File

@@ -1,39 +0,0 @@
#!/bin/bash -e
root_dir=${HOME}"/SpireCV/models"
root_server="https://download.amovlab.com/model"
sv_params1=${HOME}"/SpireCV/sv_algorithm_params.json"
sv_params2=${HOME}"/SpireCV/sv_algorithm_params_coco_640.json"
sv_params3=${HOME}"/SpireCV/sv_algorithm_params_coco_1280.json"
camera_params1=${HOME}"/SpireCV/calib_webcam_640x480.yaml"
camera_params2=${HOME}"/SpireCV/calib_webcam_1280x720.yaml"
if [ ! -d ${root_dir} ]; then
echo -e "\033[32m[INFO]: ${root_dir} not exist, creating it ... \033[0m"
mkdir -p ${root_dir}
fi
if [ ! -f ${sv_params1} ]; then
echo -e "\033[32m[INFO]: ${sv_params1} not exist, downloading ... \033[0m"
wget -O ${sv_params1} ${root_server}/install/a-params/sv_algorithm_params.json
fi
if [ ! -f ${sv_params2} ]; then
echo -e "\033[32m[INFO]: ${sv_params2} not exist, downloading ... \033[0m"
wget -O ${sv_params2} ${root_server}/install/a-params/sv_algorithm_params_coco_640.json
fi
if [ ! -f ${sv_params3} ]; then
echo -e "\033[32m[INFO]: ${sv_params3} not exist, downloading ... \033[0m"
wget -O ${sv_params3} ${root_server}/install/a-params/sv_algorithm_params_coco_1280.json
fi
if [ ! -f ${camera_params1} ]; then
echo -e "\033[32m[INFO]: ${camera_params1} not exist, downloading ... \033[0m"
wget -O ${camera_params1} ${root_server}/install/c-params/calib_webcam_640x480.yaml
fi
if [ ! -f ${camera_params2} ]; then
echo -e "\033[32m[INFO]: ${camera_params2} not exist, downloading ... \033[0m"
wget -O ${camera_params2} ${root_server}/install/c-params/calib_webcam_1280x720.yaml
fi

View File

@@ -1,66 +0,0 @@
#!/bin/sh
sudo apt install -y \
build-essential yasm cmake libtool libc6 libc6-dev unzip wget libeigen3-dev libfmt-dev \
libnuma1 libnuma-dev libx264-dev libx265-dev libfaac-dev libssl-dev v4l-utils
current_dir=$(pwd)
root_dir=${HOME}"/SpireCV"
if [ ! -d ${root_dir} ]; then
echo -e "\033[32m[INFO]: ${root_dir} not exist, creating it ... \033[0m"
mkdir -p ${root_dir}
fi
cd ${root_dir}
git clone https://gitee.com/jario-jin/nv-codec-headers.git
cd nv-codec-headers
git checkout n11.1.5.0
sudo make install
cd ..
sudo rm -r nv-codec-headers
wget https://ffmpeg.org/releases/ffmpeg-4.2.5.tar.bz2
tar -xjf ffmpeg-4.2.5.tar.bz2
cd ffmpeg-4.2.5
export PATH=$PATH:/usr/local/cuda/bin
sed -i 's#_30#_75#' configure; sed -i 's#_30#_75#' configure
./configure \
--enable-nonfree \
--enable-gpl \
--enable-shared \
--enable-ffmpeg \
--enable-ffplay \
--enable-ffprobe \
--enable-libx264 \
--enable-libx265 \
--enable-cuda-nvcc \
--enable-nvenc \
--enable-cuda \
--enable-cuvid \
--enable-libnpp \
--extra-libs="-lpthread -lm" \
--extra-cflags=-I/usr/local/cuda/include \
--extra-ldflags=-L/usr/local/cuda/lib64
make -j8
sudo make install
cd ..
git clone https://gitee.com/jario-jin/ZLMediaKit.git
cd ZLMediaKit
git submodule update --init
mkdir build
cd build
cmake ..
make -j4
cd ..
cd ..
mkdir ZLM
cd ZLM
cp ../ZLMediaKit/release/linux/Debug/MediaServer .
cp ../ZLMediaKit/release/linux/Debug/config.ini .
cd ..
cd ${current_dir}

View File

@@ -1,95 +0,0 @@
#!/bin/sh
echo "\033[32m[INFO]:\033[0m Please enter the folder path of the installation package: "
# package_dir="/home/jario/Downloads/nv"
wget https://download.amovlab.com/model/install/x86-nvidia/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
wget https://download.amovlab.com/model/install/x86-nvidia/cudnn-11.3-linux-x64-v8.2.1.32.tgz
wget https://download.amovlab.com/model/install/x86-nvidia/nv-tensorrt-repo-ubuntu1804-cuda11.3-trt8.0.1.6-ga-20210626_1-1_amd64.deb
wget https://download.amovlab.com/model/install/x86-nvidia/cuda-ubuntu1804.pin
package_dir="."
cuda_fn=$package_dir"/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb"
cudnn_fn=$package_dir"/cudnn-11.3-linux-x64-v8.2.1.32.tgz"
tensorrt_fn=$package_dir"/nv-tensorrt-repo-ubuntu1804-cuda11.3-trt8.0.1.6-ga-20210626_1-1_amd64.deb"
tmp_dir="/tmp"
echo "\033[32m[INFO]: CUDA_PKG: \033[0m"$cuda_fn
echo "\033[32m[INFO]: CUDNN_PKG: \033[0m"$cudnn_fn
echo "\033[32m[INFO]: TENSORRT_PKG: \033[0m"$tensorrt_fn
# 所有文件都存在时,才会继续执行脚本
if [ ! -f "$cuda_fn" ]; then
echo "\033[31m[ERROR]: CUDA_PKG not exist!: \033[0m"
exit 1
fi
if [ ! -f "$cudnn_fn" ]; then
echo "\033[31m[ERROR]: CUDNN_PKG not exist!: \033[0m"
exit 1
fi
if [ ! -f "$tensorrt_fn" ]; then
echo "\033[31m[ERROR]: TENSORRT_PKG not exist!: \033[0m"
exit 1
fi
# 删除显卡驱动
# sudo apt-get remove nvidia-*
# 安装显卡驱动
# echo "\033[32m[INFO]: Nvidia Driver installing ...\033[0m"
# ubuntu-drivers devices
# sudo ubuntu-drivers autoinstall
# 删除已安装CUDA
# --purge选项会将配置文件、数据库等删除
# sudo apt-get autoremove --purge cuda
# 查看安装了哪些cuda相关的库可以用以下指令
# sudo dpkg -l |grep cuda
# sudo dpkg -P cuda-repo-ubuntu1804-10-2-local-10.2.89-440.33.01
# sudo dpkg -P cuda-repo-ubuntu1804-11-1-local
# sudo dpkg -P nv-tensorrt-repo-ubuntu1804-cuda10.2-trt8.0.1.6-ga-20210626
# 这个key值是官网文档查到的当然通过apt-key list也能查看
# sudo apt-key list
# sudo apt-key del 7fa2af80
# 安装CUDA
echo "\033[32m[INFO]: CUDA installing ...\033[0m"
# wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
sudo cp $package_dir/cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600
sudo dpkg -i $cuda_fn
sudo apt-key add /var/cuda-repo-ubuntu1804-11-1-local/7fa2af80.pub
sudo apt-get update
sudo apt-get -y install cuda
# 安装CUDNN
echo "\033[32m[INFO]: CUDNN installing ...\033[0m"
tar zxvf $cudnn_fn -C $tmp_dir
sudo cp $tmp_dir/cuda/include/cudnn* /usr/local/cuda/include/
sudo cp $tmp_dir/cuda/lib64/libcudnn* /usr/local/cuda/lib64/
sudo chmod a+r /usr/local/cuda/include/cudnn* /usr/local/cuda/lib64/libcudnn*
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn.so.8
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_ops_train.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_ops_train.so.8
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_adv_train.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_adv_train.so.8
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8
sudo ln -sf /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8.2.1 /usr/local/cuda/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8
# 安装TensorRT
sudo dpkg -i $tensorrt_fn
sudo apt-key add /var/nv-tensorrt-repo-cuda11.3-trt8.0.1.6-ga-20210626/7fa2af80.pub
sudo apt-get update
sudo apt-get install tensorrt -y
sudo apt-get install python3-libnvinfer-dev -y
sudo rm $cuda_fn
sudo rm $cudnn_fn
sudo rm $tensorrt_fn
sudo rm cuda-ubuntu1804.pin

View File

@@ -5,7 +5,7 @@ wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_cache_x86-4.7.0.zip
current_dir=$(pwd)
package_dir="."
mkdir ~/opencv_build
@@ -67,4 +67,3 @@ sudo make install
cd
sudo rm -r ~/opencv_build
cd ${current_dir}

View File

@@ -5,7 +5,7 @@ wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_cache_x86-4.7.0.zip
current_dir=$(pwd)
package_dir="."
mkdir ~/opencv_build
@@ -67,4 +67,3 @@ sudo make install
cd
sudo rm -r ~/opencv_build
cd ${current_dir}

View File

@@ -1,39 +0,0 @@
#!/bin/bash -e
root_dir=${HOME}"/SpireCV/models"
root_server="https://download.amovlab.com/model"
sv_params1=${HOME}"/SpireCV/sv_algorithm_params.json"
sv_params2=${HOME}"/SpireCV/sv_algorithm_params_coco_640.json"
sv_params3=${HOME}"/SpireCV/sv_algorithm_params_coco_1280.json"
camera_params1=${HOME}"/SpireCV/calib_webcam_640x480.yaml"
camera_params2=${HOME}"/SpireCV/calib_webcam_1280x720.yaml"
if [ ! -d ${root_dir} ]; then
echo -e "\033[32m[INFO]: ${root_dir} not exist, creating it ... \033[0m"
mkdir -p ${root_dir}
fi
if [ ! -f ${sv_params1} ]; then
echo -e "\033[32m[INFO]: ${sv_params1} not exist, downloading ... \033[0m"
wget -O ${sv_params1} ${root_server}/install/a-params/sv_algorithm_params.json
fi
if [ ! -f ${sv_params2} ]; then
echo -e "\033[32m[INFO]: ${sv_params2} not exist, downloading ... \033[0m"
wget -O ${sv_params2} ${root_server}/install/a-params/sv_algorithm_params_coco_640.json
fi
if [ ! -f ${sv_params3} ]; then
echo -e "\033[32m[INFO]: ${sv_params3} not exist, downloading ... \033[0m"
wget -O ${sv_params3} ${root_server}/install/a-params/sv_algorithm_params_coco_1280.json
fi
if [ ! -f ${camera_params1} ]; then
echo -e "\033[32m[INFO]: ${camera_params1} not exist, downloading ... \033[0m"
wget -O ${camera_params1} ${root_server}/install/c-params/calib_webcam_640x480.yaml
fi
if [ ! -f ${camera_params2} ]; then
echo -e "\033[32m[INFO]: ${camera_params2} not exist, downloading ... \033[0m"
wget -O ${camera_params2} ${root_server}/install/c-params/calib_webcam_1280x720.yaml
fi

View File

@@ -1,26 +0,0 @@
#!/bin/sh
sudo apt install -y v4l-utils build-essential yasm cmake libtool libc6 libc6-dev unzip wget libeigen3-dev libfmt-dev libnuma1 libnuma-dev libx264-dev libx265-dev libfaac-dev libssl-dev v4l-utils
wget https://ffmpeg.org/releases/ffmpeg-4.2.5.tar.bz2
tar -xjf ffmpeg-4.2.5.tar.bz2
cd ffmpeg-4.2.5
./configure \
--arch=x86_64 \
--disable-x86asm \
--enable-vaapi \
--enable-libmfx \
--enable-nonfree \
--enable-shared \
--enable-ffmpeg \
--enable-ffplay \
--enable-ffprobe \
--enable-libx264 \
--enable-libx265 \
--enable-gpl
make -j8
sudo make install
cd ..
sudo rm -r ffmpeg-4.2.5
sudo rm ffmpeg-4.2.5.tar.bz2

View File

@@ -1,6 +0,0 @@
#!/bin/sh
sudo apt-get install -y libmfx1 libmfx-tools libva-dev libmfx-dev intel-media-va-driver-non-free vainfo
echo "export LIBVA_DRIVER_NAME=iHD" >> ~/.bashrc
export LIBVA_DRIVER_NAME=iHD

View File

@@ -1,7 +0,0 @@
#!/bin/sh
curl -L https://repositories.intel.com/graphics/intel-graphics.key | sudo apt-key add -
sudo apt-add-repository 'deb [arch=amd64] https://repositories.intel.com/graphics/ubuntu focal main'
sudo apt-get update
sudo apt-get install -y -q --no-install-recommends clinfo intel-opencl-icd intel-media-va-driver-non-free

View File

@@ -1,70 +0,0 @@
#!/bin/sh
wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_cache_x86-4.7.0.zip
current_dir=$(pwd)
package_dir="."
mkdir ~/opencv_build
if [ ! -d ""$package_dir"" ];then
echo "\033[31m[ERROR]: $package_dir not exist!: \033[0m"
exit 1
fi
# sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
# sudo add-apt-repository "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports/ xenial main multiverse restricted universe"
sudo apt update
sudo apt install -y build-essential
sudo apt install -y cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32
sudo apt update
sudo apt install -y libjasper1 libjasper-dev
sudo apt install -y python3-dev python3-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev
sudo apt install -y libdc1394-22-dev
sudo apt install -y libcurl4 build-essential pkg-config cmake libopenblas-dev libeigen3-dev \
libtbb-dev libavcodec-dev libavformat-dev libgstreamer-plugins-base1.0-dev \
libgstreamer1.0-dev libswscale-dev libgtk-3-dev libpng-dev libjpeg-dev \
libcanberra-gtk-module libcanberra-gtk3-module
echo "\033[32m[INFO]:\033[0m unzip opencv-4.7.0.zip ..."
unzip -q -o $package_dir/opencv-4.7.0.zip -d ~/opencv_build
echo "\033[32m[INFO]:\033[0m unzip opencv_contrib-4.7.0.zip ..."
unzip -q -o $package_dir/opencv_contrib-4.7.0.zip -d ~/opencv_build
echo "\033[32m[INFO]:\033[0m unzip opencv_cache_x86-4.7.0.zip ..."
unzip -q -o $package_dir/opencv_cache_x86-4.7.0.zip -d ~/opencv_build
sudo rm opencv-4.7.0.zip
sudo rm opencv_contrib-4.7.0.zip
sudo rm opencv_cache_x86-4.7.0.zip
cd ~/opencv_build/opencv-4.7.0
mkdir .cache
cp -r ~/opencv_build/opencv_cache_x86-4.7.0/* ~/opencv_build/opencv-4.7.0/.cache/
mkdir build
cd build
cmake -D CMAKE_BUILD_TYPE=Release \
-D WITH_CUDA=OFF \
-D OPENCV_ENABLE_NONFREE=ON \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-4.7.0/modules ..
make -j2
sudo make install
cd
sudo rm -r ~/opencv_build
cd ${current_dir}

View File

@@ -1,20 +0,0 @@
#!/bin/sh
sudo mkdir /opt/intel
current_dir=$(pwd)
cd ${HOME}/Downloads
curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2022.3.1/linux/l_openvino_toolkit_ubuntu20_2022.3.1.9227.cf2c7da5689_x86_64.tgz --output openvino_2022.3.1.tgz
tar -xf openvino_2022.3.1.tgz
sudo mv l_openvino_toolkit_ubuntu20_2022.3.1.9227.cf2c7da5689_x86_64 /opt/intel/openvino_2022.3.1
cd /opt/intel/openvino_2022.3.1
sudo -E ./install_dependencies/install_openvino_dependencies.sh
cd /opt/intel
sudo ln -s openvino_2022.3.1 openvino_2022
echo "source /opt/intel/openvino_2022/setupvars.sh" >> ~/.bashrc
sh /opt/intel/openvino_2022/setupvars.sh
cd ${current_dir}

View File

@@ -1,22 +0,0 @@
#!/bin/sh
current_dir=$(pwd)
cd ${HOME}/SpireCV
git clone https://gitee.com/jario-jin/ZLMediaKit.git
cd ZLMediaKit
git submodule update --init
mkdir build
cd build
cmake ..
make -j4
cd ..
cd ..
mkdir ZLM
cd ZLM
cp ../ZLMediaKit/release/linux/Debug/MediaServer .
cp ../ZLMediaKit/release/linux/Debug/config.ini .
cd ${current_dir}

View File

@@ -379,7 +379,7 @@ void _load_all_json(std::string json_fn, JsonValue& value, JsonAllocator& alloca
}
fin.close();
// std::cout << json_str << std::endl;
char source[1024 * 256]; // 256K
char source[1024 * 1024]; // 1M
char *endptr;
strcpy(source, json_str.c_str());

View File

@@ -6,10 +6,6 @@
#include <ctime>
#include <chrono>
#include <fstream>
#include <dirent.h>
#include <opencv2/opencv.hpp>
#include <unordered_map>
using namespace std;
@@ -29,15 +25,6 @@ bool _is_file_exist(std::string& fn)
return f.good();
}
bool _comp_str_greater(const std::string& a, const std::string& b)
{
return a > b;
}
bool _comp_str_lesser(const std::string& a, const std::string& b)
{
return a < b;
}
void _get_sys_time(TimeInfo& t_info)
{
@@ -149,56 +136,4 @@ int _comp_str_idx(const std::string& in_str, const std::string* str_list, int le
return -1;
}
void _list_dir(std::string dir, std::vector<std::string>& files, std::string suffixs, std::string prefix, bool r) {
// assert(_endswith(dir, "/") || _endswith(dir, "\\"));
DIR *pdir;
struct dirent *ent;
string childpath;
string absolutepath;
pdir = opendir(dir.c_str());
assert(pdir != NULL);
vector<string> suffixd(0);
if (!suffixs.empty() && suffixs != "") {
suffixd = _split(suffixs, "|");
}
while ((ent = readdir(pdir)) != NULL) {
if (ent->d_type & DT_DIR) {
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0) {
continue;
}
if (r) { // If need to traverse subdirectories
childpath = dir + ent->d_name;
_list_dir(childpath, files);
}
}
else {
if (suffixd.size() > 0) {
bool can_push = false, cancan_push = true;
for (int i = 0; i < (int)suffixd.size(); i++) {
if (_endswith(ent->d_name, suffixd[i]))
can_push = true;
}
if (prefix.size() > 0) {
if (!_startswith(ent->d_name, prefix))
cancan_push = false;
}
if (can_push && cancan_push) {
absolutepath = dir + ent->d_name;
files.push_back(ent->d_name); // filepath
}
}
else {
absolutepath = dir + ent->d_name;
files.push_back(ent->d_name); // filepath
}
}
}
sort(files.begin(), files.end()); //sort names
}
}

View File

@@ -24,13 +24,10 @@ bool _startswith(const std::string& str, const std::string& start);
bool _endswith(const std::string& str, const std::string& end);
std::string _trim(const std::string& str);
int _comp_str_idx(const std::string& in_str, const std::string* str_list, int len);
bool _comp_str_greater(const std::string& a, const std::string& b);
bool _comp_str_lesser(const std::string& a, const std::string& b);
/************* file-related functions ***************/
std::string _get_home();
bool _is_file_exist(std::string& fn);
void _list_dir(std::string dir, std::vector<std::string>& files, std::string suffixs="", std::string prefix="", bool r=false);
}

View File

@@ -0,0 +1,106 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-12-19 18:30:17
* @LastEditors: L LC @amov
* @LastEditTime: 2023-12-21 17:56:47
* @FilePath: /SpireCV/video_io/driver/sv_camera_G1.cpp
*/
#include "sv_camera_privately.h"
#include <fstream>
class sv_camera_G1 : public sv_p::CameraBase
{
private:
std::string _ip;
public:
bool setStream(const std::string &ip, uint16_t port);
sv::CameraType getType(void) { return sv::CameraType::G1; }
std::string getName(void) { return _ip; }
bool open(void);
sv_camera_G1(int timeOut);
~sv_camera_G1();
static CameraBase *creat(int timeOut)
{
return new sv_camera_G1(timeOut);
}
};
sv_camera_G1::sv_camera_G1(int timeOut) : sv_p::CameraBase(timeOut)
{
}
bool sv_camera_G1::setStream(const std::string &ip, uint16_t port)
{
bool ret = false;
if (!this->cap.isOpened())
{
this->_ip = ip;
ret = true;
}
return ret;
}
const static uint32_t imageHList[3] = {1520, 1080, 720};
const static uint32_t imageWList[3] = {2704, 1920, 1280};
// 无论如何都用gst打开 因此安装的时候必须安装gst
bool sv_camera_G1::open(void)
{
bool ret = false;
std::ostringstream pipeline;
pipeline << "rtspsrc location = rtsp://" << this->_ip << ":554/H264?W=";
// 判断尺寸是否合法 不合法则找最高画质进行缩放
uint8_t i;
for (i = 0; i < 3; i++)
{
if (imageHList[i] <= this->getH())
{
if (imageWList[i] <= this->getW())
{
break;
}
}
}
// 在范围内
if (i < 3)
{
pipeline << imageWList[i] << "&H=" << imageHList[i];
}
else
{
pipeline << imageWList[2] << "&H=" << imageHList[2];
}
pipeline << "&FPS=" << this->getExpectFps() << "&BR=4000000 latency=100 "
<< "! application/x-rtp,media=video ! rtph264depay ! parsebin ! ";
#ifdef PLATFORM_JETSON
pipeline << "nvv4l2decoder enable-max-performancegst=1 \
! nvvidconv ! video/x-raw,format=(string)BGRx"
<< ",width=(int)" << this->getW()
<< ",height=(int)" << this->getH();
#else
pipeline << "avdec_h264 ! videoscale ! video/x-raw"
<< ",width=(int)" << this->getW()
<< ",height=(int)" << this->getH();
#endif
pipeline << " ! videoconvert ! video/x-raw,format=(string)BGR ! appsink sync=false";
if (this->cap.open(pipeline.str(), cv::CAP_GSTREAMER))
{
// 开启读取线程
std::thread readLoop(&CameraBase::readThread, this);
this->readThreadHandle = readLoop.native_handle();
readLoop.detach();
ret = true;
}
return ret;
}

View File

@@ -0,0 +1,85 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-12-19 18:30:17
* @LastEditors: L LC @amov
* @LastEditTime: 2023-12-21 17:57:02
* @FilePath: /SpireCV/video_io/driver/sv_camera_GX40.cpp
*/
#include "sv_camera_privately.h"
#include <fstream>
class sv_camera_GX40 : public sv_p::CameraBase
{
private:
std::string _ip;
uint16_t _port = 554;
public:
bool setStream(const std::string &ip, uint16_t port);
sv::CameraType getType(void) { return sv::CameraType::GX40; }
std::string getName(void) { return _ip; }
bool open(void);
sv_camera_GX40(int timeOut);
~sv_camera_GX40();
static CameraBase *creat(int timeOut)
{
return new sv_camera_GX40(timeOut);
}
};
sv_camera_GX40::sv_camera_GX40(int timeOut) : sv_p::CameraBase(timeOut)
{
}
bool sv_camera_GX40::setStream(const std::string &ip, uint16_t port)
{
bool ret = false;
if (!this->cap.isOpened())
{
this->_ip = ip;
this->_port = port;
ret = true;
}
return ret;
}
// 无论如何都用gst打开 因此安装的时候必须安装gst
bool sv_camera_GX40::open(void)
{
bool ret = false;
std::ostringstream pipeline;
pipeline << "rtspsrc location = rtsp://user:0000@" << this->_ip << ":" << this->_port
<< "/cam/realmonitor?channel=1&subtype=0 latency=100 ! application/x-rtp,media=video ! "
<< "rtph265depay ! parsebin ! ";
#ifdef PLATFORM_JETSON
pipeline << "nvv4l2decoder ! nvvidconv flip-method=4 ! ";
#else
pipeline << "avdec_h265 ! videoscale ! ";
#endif
pipeline << "video/x-raw,format=(string)BGRx"
<< ",width=(int)" << this->getW()
<< ",height=(int)" << this->getH()
<< " ";
#ifndef PLATFORM_JETSON
pipeline << "! videoflip video-direction=4 ";
#endif
pipeline << " ! videoconvert ! video/x-raw,format=(string)BGR ! appsink sync=false";
if (this->cap.open(pipeline.str(), cv::CAP_GSTREAMER))
{
// 开启读取线程
std::thread readLoop(&CameraBase::readThread, this);
this->readThreadHandle = readLoop.native_handle();
readLoop.detach();
ret = true;
}
return ret;
}

View File

@@ -0,0 +1,78 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-12-19 18:30:17
* @LastEditors: L LC @amov
* @LastEditTime: 2023-12-21 17:57:07
* @FilePath: /SpireCV/video_io/driver/sv_camera_MC1.cpp
*/
#include "sv_camera_privately.h"
#include <fstream>
class sv_camera_MC1 : public sv_p::CameraBase
{
private:
int _index = -2;
public:
bool setIndex(int index);
sv::CameraType getType(void) { return sv::CameraType::MC1; }
bool open(void);
sv_camera_MC1(int timeOut);
~sv_camera_MC1();
static CameraBase *creat(int timeOut)
{
#ifdef PLATFORM_JETSON
return new sv_camera_MC1(timeOut);
#else
throw std::runtime_error("SpireCV (101) Camera not supported except allspark");
return nullptr;
#endif
}
};
sv_camera_MC1::sv_camera_MC1(int timeOut) : sv_p::CameraBase(timeOut)
{
}
bool sv_camera_MC1::setIndex(int index)
{
bool ret = false;
if (!this->cap.isOpened())
{
this->_index = index;
ret = true;
}
return ret;
}
// 普通的mipi相机 采用 v4l2 框架打开
bool sv_camera_MC1::open(void)
{
bool ret = false;
std::ostringstream pipeline;
pipeline << "nvarguscamerasrc sensor-id=" << this->_index;
pipeline << " ! video/x-raw,video/x-raw(memory:NVMM),"
<< "width=(int)" << this->getW()
<< "height=(int)" << this->getH()
<< "framerate=" << this->getExpectFps() << "/1 !"
<< " nvvidconv flip-method=0 ! video/x-raw,format=(string)BGRx !"
<< " videoconvert ! video/x-raw, format=(string)BGR ! appsink";
if (this->cap.open(pipeline.str(), cv::CAP_GSTREAMER))
{
// 开启读取线程
std::thread readLoop(&CameraBase::readThread, this);
this->readThreadHandle = readLoop.native_handle();
readLoop.detach();
ret = true;
}
return ret;
}

View File

@@ -0,0 +1,149 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-12-21 10:45:50
* @LastEditors: L LC @amov
* @LastEditTime: 2023-12-21 17:57:30
* @FilePath: /SpireCV/video_io/driver/sv_camera_V4L2s.cpp
*/
#include "sv_camera_privately.h"
class sv_camera_V4L2 : public sv_p::CameraBase
{
private:
int _index = -1;
std::string _name = "\0";
public:
bool setName(const std::string &name);
bool setIndex(int index);
virtual sv::CameraType getType(void) { return sv::CameraType::V4L2CAM; }
std::string getName(void) { return this->_name; }
bool open(void);
sv_camera_V4L2(int timeOut);
~sv_camera_V4L2();
static CameraBase *creat(int timeOut)
{
return new sv_camera_V4L2(timeOut);
}
};
sv_camera_V4L2::sv_camera_V4L2(int timeOut) : sv_p::CameraBase(timeOut)
{
}
bool sv_camera_V4L2::setName(const std::string &name)
{
bool ret = false;
if (!this->cap.isOpened())
{
this->_name = name;
ret = true;
}
return ret;
}
bool sv_camera_V4L2::setIndex(int index)
{
bool ret = false;
if (!this->cap.isOpened())
{
this->_index = index;
ret = true;
}
return ret;
}
bool sv_camera_V4L2::open(void)
{
bool ret = false;
if (this->_index >= 0)
{
ret = this->cap.open(this->_index, cv::CAP_V4L2);
}
if (!ret)
{
if (this->_name != "\0")
{
ret = this->cap.open(this->_name, cv::CAP_V4L2);
}
}
if (ret)
{
// 设置属性
this->setFps(this->getExpectFps());
this->setBrightness(this->getBrightness());
this->setContrast(this->getContrast());
this->setSaturation(this->getSaturation());
this->setHue(this->getHue());
this->setExposure(this->getExposure());
// 开启读取线程
std::thread readLoop(&CameraBase::readThread, this);
this->readThreadHandle = readLoop.native_handle();
readLoop.detach();
}
return ret;
}
class sv_camera_WEBCAM : public sv_camera_V4L2
{
public:
sv::CameraType getType(void) { return sv::CameraType::WEBCAM; }
sv_camera_WEBCAM(int timeOut);
~sv_camera_WEBCAM();
static CameraBase *creat(int timeOut)
{
return new sv_camera_WEBCAM(timeOut);
}
};
sv_camera_WEBCAM::sv_camera_WEBCAM(int timeOut) : sv_camera_V4L2(timeOut)
{
}
class sv_camera_Q10 : public sv_camera_V4L2
{
public:
sv::CameraType getType(void) { return sv::CameraType::Q10; }
sv_camera_Q10(int timeOut);
~sv_camera_Q10();
static CameraBase *creat(int timeOut)
{
return new sv_camera_Q10(timeOut);
}
};
sv_camera_Q10::sv_camera_Q10(int timeOut) : sv_camera_V4L2(timeOut)
{
}
class sv_camera_NONE : public sv_camera_V4L2
{
public:
sv::CameraType getType(void) { return sv::CameraType::NONE; }
sv_camera_NONE(int timeOut);
~sv_camera_NONE();
static CameraBase *creat(int timeOut)
{
return new sv_camera_NONE(timeOut);
}
};
sv_camera_NONE::sv_camera_NONE(int timeOut) : sv_camera_V4L2(timeOut)
{
}

View File

@@ -0,0 +1,79 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-12-21 11:30:40
* @LastEditors: L LC @amov
* @LastEditTime: 2023-12-21 17:56:34
* @FilePath: /SpireCV/video_io/driver/sv_camera_file.cpp
*/
#include "sv_camera_privately.h"
class sv_camera_FILE : public sv_p::CameraBase
{
private:
std::string _name = "\0";
public:
bool setName(const std::string &name);
virtual sv::CameraType getType(void) { return sv::CameraType::VIDEOFILE; }
std::string getName(void) { return this->_name; }
bool open(void);
bool read(cv::Mat &image)
{
return this->cap.read(image);
}
bool readNoBlock(cv::Mat &image)
{
return this->read(image);
}
sv_camera_FILE(int timeOut);
~sv_camera_FILE();
static CameraBase *creat(int timeOut)
{
return new sv_camera_FILE(timeOut);
}
};
sv_camera_FILE::sv_camera_FILE(int timeOut) : sv_p::CameraBase(timeOut)
{
}
bool sv_camera_FILE::setName(const std::string &name)
{
bool ret = false;
if (!this->cap.isOpened())
{
this->_name = name;
ret = true;
}
return ret;
}
bool sv_camera_FILE::open(void)
{
bool ret = false;
if (this->_name != "\0")
{
ret = this->cap.open(this->_name);
}
if (ret)
{
// 设置属性
this->setFps(this->getExpectFps());
this->setBrightness(this->getBrightness());
this->setContrast(this->getContrast());
this->setSaturation(this->getSaturation());
this->setHue(this->getHue());
this->setExposure(this->getExposure());
}
return ret;
}

View File

@@ -0,0 +1,95 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-12-19 18:30:17
* @LastEditors: L LC @amov
* @LastEditTime: 2023-12-21 17:57:12
* @FilePath: /SpireCV/video_io/driver/sv_camera_mipi.cpp
*/
#include "sv_camera_privately.h"
#include <fstream>
class sv_camera_MIPI : public sv_p::CameraBase
{
private:
int _index = -2;
std::string _name = "\0";
public:
bool setName(const std::string &name);
bool setIndex(int index);
sv::CameraType getType(void) { return sv::CameraType::MIPI; }
std::string getName(void) { return _name; }
bool open(void);
sv_camera_MIPI(int timeOut);
~sv_camera_MIPI();
static CameraBase *creat(int timeOut)
{
return new sv_camera_MIPI(timeOut);
}
};
sv_camera_MIPI::sv_camera_MIPI(int timeOut) : sv_p::CameraBase(timeOut)
{
}
bool sv_camera_MIPI::setName(const std::string &name)
{
bool ret = false;
if (!this->cap.isOpened())
{
this->_name = name;
ret = true;
}
return ret;
}
bool sv_camera_MIPI::setIndex(int index)
{
bool ret = false;
if (!this->cap.isOpened())
{
this->_index = index;
ret = true;
}
return ret;
}
// 普通的mipi相机 采用 v4l2 框架打开
bool sv_camera_MIPI::open(void)
{
bool ret = false;
std::ostringstream pipeline;
pipeline << "v4l2src ";
if (this->_name != "\0")
{
pipeline << "device=" << this->_name << " !";
}
else if (this->_index != -2)
{
pipeline << "device-fd=" << this->_index << " !";
}
pipeline << " video/x-raw,format=(string)NV12,"
<< "width=(int)" << this->getW()
<< "height=(int)" << this->getH()
<< "framerate=" << this->getExpectFps() << "/1 !"
<< " videoconvert ! video/x-raw, format=(string)BGR ! appsink";
if (this->cap.open(pipeline.str(), cv::CAP_GSTREAMER))
{
// 开启读取线程
std::thread readLoop(&CameraBase::readThread, this);
this->readThreadHandle = readLoop.native_handle();
readLoop.detach();
ret = true;
}
return ret;
}

View File

@@ -0,0 +1,74 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-12-21 11:30:40
* @LastEditors: L LC @amov
* @LastEditTime: 2023-12-21 17:57:22
* @FilePath: /SpireCV/video_io/driver/sv_camera_streaming.cpp
*/
#include "sv_camera_privately.h"
class sv_camera_Streaming : public sv_p::CameraBase
{
private:
std::string _name = "\0";
public:
bool setName(const std::string &name);
virtual sv::CameraType getType(void) { return sv::CameraType::STREAMING; }
std::string getName(void) { return this->_name; }
bool open(void);
sv_camera_Streaming(int timeOut);
~sv_camera_Streaming();
static CameraBase *creat(int timeOut)
{
return new sv_camera_Streaming(timeOut);
}
};
sv_camera_Streaming::sv_camera_Streaming(int timeOut) : sv_p::CameraBase(timeOut)
{
}
bool sv_camera_Streaming::setName(const std::string &name)
{
bool ret = false;
if (!this->cap.isOpened())
{
this->_name = name;
ret = true;
}
return ret;
}
bool sv_camera_Streaming::open(void)
{
bool ret = false;
if (this->_name != "\0")
{
ret = this->cap.open(this->_name);
}
if (ret)
{
// 设置属性
this->setFps(this->getExpectFps());
this->setBrightness(this->getBrightness());
this->setContrast(this->getContrast());
this->setSaturation(this->getSaturation());
this->setHue(this->getHue());
this->setExposure(this->getExposure());
// 开启读取线程
std::thread readLoop(&CameraBase::readThread, this);
this->readThreadHandle = readLoop.native_handle();
readLoop.detach();
}
return ret;
}

Some files were not shown because too many files have changed in this diff Show More