fst commit

This commit is contained in:
jario-jin 2023-06-16 10:42:02 +08:00
parent 5793dd0af6
commit 0136d3c2da
122 changed files with 25173 additions and 25 deletions

42
.gitignore vendored
View File

@ -1,3 +1,21 @@
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
eggs/
.eggs/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
.idea/
# Prerequisites # Prerequisites
*.d *.d
@ -30,3 +48,27 @@
*.exe *.exe
*.out *.out
*.app *.app
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# VSCode Editor
.vscode/

378
CMakeLists.txt Normal file
View File

@ -0,0 +1,378 @@
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
cmake_policy(SET CMP0054 NEW)
set(PROJECT_VERSION 0.2.0)
project(SpireCV VERSION ${PROJECT_VERSION} LANGUAGES CXX)
add_definitions(-DAPI_EXPORTS)
set(CMAKE_BUILD_TYPE "Release")
## JETSON, X86_CUDA
message(STATUS "System:${CMAKE_HOST_SYSTEM_PROCESSOR}")
if(NOT DEFINED PLATFORM)
message(FATAL_ERROR "PLATFORM NOT SPECIFIED!")
else()
message(STATUS "PLATFORM: ${PLATFORM}")
if(PLATFORM STREQUAL "JETSON")
add_definitions(-DPLATFORM_JETSON)
option(USE_CUDA "BUILD WITH CUDA." ON)
option(USE_GSTREAMER "BUILD WITH GSTREAMER." ON)
elseif(PLATFORM STREQUAL "X86_CUDA")
add_definitions(-DPLATFORM_X86_CUDA)
option(USE_CUDA "BUILD WITH CUDA." ON)
option(USE_FFMPEG "BUILD WITH FFMPEG." ON)
else()
message(FATAL_ERROR "UNSUPPORTED PLATFORM!")
endif()
endif()
if(USE_CUDA)
add_definitions(-DWITH_CUDA)
option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
find_package(CUDA REQUIRED)
message(STATUS "CUDA: ON")
endif()
if(USE_GSTREAMER)
add_definitions(-DWITH_GSTREAMER)
message(STATUS "GSTREAMER: ON")
endif()
if(USE_FFMPEG)
add_definitions(-DWITH_FFMPEG)
find_package(fmt REQUIRED)
set(FFMPEG_LIBS libavutil.so libavcodec.so libavformat.so libavdevice.so libavfilter.so libswscale.so)
message(STATUS "WITH_FFMPEG: ON")
endif()
add_definitions(-DWITH_OCV470)
find_package(OpenCV 4.7 REQUIRED)
message(STATUS "OpenCV library status:")
message(STATUS " version: ${OpenCV_VERSION}")
message(STATUS " libraries: ${OpenCV_LIBS}")
message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/IOs/serial/include
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/FIFO
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/G1
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/G2
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/Q10f
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src
${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/cuda
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/tracking/ocv470
${CMAKE_CURRENT_SOURCE_DIR}/video_io
${CMAKE_CURRENT_SOURCE_DIR}/algorithm/ellipse_det
${CMAKE_CURRENT_SOURCE_DIR}/utils
)
if(USE_GSTREAMER)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/video_io/gstreamer)
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
include_directories(
"/usr/include/gstreamer-1.0"
"/usr/local/include/gstreamer-1.0"
"/usr/include/glib-2.0"
"/usr/lib/aarch64-linux-gnu/glib-2.0/include"
)
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64")
include_directories(
"/usr/include/gstreamer-1.0"
"/usr/local/include/gstreamer-1.0"
"/usr/include/glib-2.0"
"/usr/lib/x86_64-linux-gnu/glib-2.0/include"
)
endif()
endif()
if(USE_FFMPEG)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg)
endif()
# Public header
set(
public_HEADS
include/sv_core.h
include/sv_video_base.h
include/sv_gimbal.h
include/sv_algorithm_base.h
include/sv_common_det.h
include/sv_landing_det.h
include/sv_tracking.h
include/sv_video_input.h
include/sv_video_output.h
include/sv_world.h
)
# Gimbal Sources
set(serial_SRCS
gimbal_ctrl/IOs/serial/src/serial.cc
)
list(APPEND serial_SRCS gimbal_ctrl/IOs/serial/src/impl/unix.cc)
list(APPEND serial_SRCS gimbal_ctrl/IOs/serial/src/impl/list_ports/list_ports_linux.cc)
set(driver_SRCS
gimbal_ctrl/driver/src/FIFO/Ring_Fifo.cc
)
file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/G1/*.cpp)
list(APPEND driver_SRCS ${DRV_LIB_FILES})
file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/G2/*.cpp)
list(APPEND driver_SRCS ${DRV_LIB_FILES})
file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/Q10f/*.cpp)
list(APPEND driver_SRCS ${DRV_LIB_FILES})
file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/*.cpp)
list(APPEND driver_SRCS ${DRV_LIB_FILES})
set(gimbal_SRCS
gimbal_ctrl/sv_gimbal.cpp
gimbal_ctrl/sv_gimbal_io.hpp
)
# Gimbal Lib
add_library(sv_gimbal SHARED ${serial_SRCS} ${driver_SRCS} ${gimbal_SRCS})
target_link_libraries(sv_gimbal rt pthread)
set(spirecv_SRCS
algorithm/sv_algorithm_base.cpp
algorithm/ellipse_det/ellipse_detector.cpp
algorithm/common_det/sv_common_det.cpp
algorithm/landing_det/sv_landing_det.cpp
algorithm/tracking/sv_tracking.cpp
)
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/tracking/ocv470/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
if(USE_CUDA)
list(APPEND spirecv_SRCS algorithm/common_det/cuda/yolov7/preprocess.cu)
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda/yolov7/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/cuda/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
endif()
if(USE_FFMPEG)
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
endif()
if(USE_GSTREAMER)
file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/gstreamer/*.cpp)
list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
endif()
if(USE_CUDA)
# CUDA
include_directories(/usr/local/cuda/include)
link_directories(/usr/local/cuda/lib64)
# TensorRT
include_directories(/usr/include/x86_64-linux-gnu)
link_directories(/usr/lib/x86_64-linux-gnu)
# Add library
cuda_add_library(sv_yoloplugins SHARED algorithm/common_det/cuda/yolov7/yololayer.cu)
target_link_libraries(sv_yoloplugins nvinfer cudart)
cuda_add_library(sv_world SHARED ${spirecv_SRCS})
if(USE_GSTREAMER)
target_link_libraries(
sv_world ${OpenCV_LIBS}
sv_yoloplugins sv_gimbal
nvinfer cudart
gstrtspserver-1.0
)
else()
target_link_libraries(
sv_world ${OpenCV_LIBS}
sv_yoloplugins sv_gimbal
nvinfer cudart
)
endif()
if(USE_FFMPEG)
target_link_libraries(sv_world ${FFMPEG_LIBS} fmt)
endif()
set(
YOLO_SRCS
algorithm/common_det/cuda/yolov7/preprocess.cu
algorithm/common_det/cuda/yolov7/postprocess.cpp
algorithm/common_det/cuda/yolov7/model.cpp
algorithm/common_det/cuda/yolov7/calibrator.cpp
)
cuda_add_executable(SpireCVDet samples/SpireCVDet.cpp ${YOLO_SRCS})
target_link_libraries(SpireCVDet sv_world)
cuda_add_executable(SpireCVSeg samples/SpireCVSeg.cpp ${YOLO_SRCS})
target_link_libraries(SpireCVSeg sv_world)
elseif(PLATFORM STREQUAL "X86_CPU")
add_library(sv_world SHARED ${spirecv_SRCS})
target_link_libraries(
sv_world ${OpenCV_LIBS}
sv_gimbal
)
if(USE_GSTREAMER)
target_link_libraries(sv_world gstrtspserver-1.0)
endif()
if(USE_FFMPEG)
target_link_libraries(sv_world ${FFMPEG_LIBS} fmt)
endif()
endif()
add_executable(ArucoDetection samples/demo/aruco_detection.cpp)
target_link_libraries(ArucoDetection sv_world)
add_executable(CameraReading samples/demo/camera_reading.cpp)
target_link_libraries(CameraReading sv_world)
add_executable(CommonObjectDetection samples/demo/common_object_detection.cpp)
target_link_libraries(CommonObjectDetection sv_world)
add_executable(DetectionWithClickedTracking samples/demo/detection_with_clicked_tracking.cpp)
target_link_libraries(DetectionWithClickedTracking sv_world)
add_executable(EllipseDetection samples/demo/ellipse_detection.cpp)
target_link_libraries(EllipseDetection sv_world)
add_executable(LandingMarkerDetection samples/demo/landing_marker_detection.cpp)
target_link_libraries(LandingMarkerDetection sv_world)
add_executable(SingleObjectTracking samples/demo/single_object_tracking.cpp)
target_link_libraries(SingleObjectTracking sv_world)
add_executable(UdpDetectionInfoReceiver samples/demo/udp_detection_info_receiver.cpp)
target_link_libraries(UdpDetectionInfoReceiver sv_world)
add_executable(UdpDetectionInfoSender samples/demo/udp_detection_info_sender.cpp)
target_link_libraries(UdpDetectionInfoSender sv_world)
add_executable(VideoSaving samples/demo/video_saving.cpp)
target_link_libraries(VideoSaving sv_world)
add_executable(VideoStreaming samples/demo/video_streaming.cpp)
target_link_libraries(VideoStreaming sv_world)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/samples/calib)
add_executable(CameraCalibrarion samples/calib/calibrate_camera_charuco.cpp)
target_link_libraries(CameraCalibrarion ${OpenCV_LIBS})
message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
if (NOT DEFINED SV_INSTALL_PREFIX)
set(SV_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX})
message(STATUS "SV_INSTALL_PREFIX: ${SV_INSTALL_PREFIX}")
else()
message(STATUS "SV_INSTALL_PREFIX: ${SV_INSTALL_PREFIX}")
endif()
if(USE_CUDA)
install(TARGETS sv_gimbal sv_yoloplugins sv_world
LIBRARY DESTINATION lib
)
install(TARGETS SpireCVDet SpireCVSeg
RUNTIME DESTINATION bin
)
elseif(PLATFORM STREQUAL "X86_CPU")
install(TARGETS sv_world
LIBRARY DESTINATION lib
)
endif()
install(FILES ${public_HEADS}
DESTINATION include
)
if(PLATFORM STREQUAL "JETSON")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[
@PACKAGE_INIT@
find_package(OpenCV 4 REQUIRED)
link_directories(/usr/local/cuda/lib64)
set(SV_INCLUDE_DIRS
@SV_INSTALL_PREFIX@/include
/usr/include/x86_64-linux-gnu
/usr/local/cuda/include
${OpenCV_INCLUDE_DIRS}
/usr/include/gstreamer-1.0
/usr/local/include/gstreamer-1.0
/usr/include/glib-2.0
/usr/lib/aarch64-linux-gnu/glib-2.0/include
)
set(SV_LIBRARIES
@SV_INSTALL_PREFIX@/lib/libsv_yoloplugins.so
@SV_INSTALL_PREFIX@/lib/libsv_world.so
@SV_INSTALL_PREFIX@/lib/libsv_gimbal.so
${OpenCV_LIBS}
nvinfer cudart rt pthread
gstrtspserver-1.0
)
]])
elseif(PLATFORM STREQUAL "X86_CUDA")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[
@PACKAGE_INIT@
find_package(OpenCV 4 REQUIRED)
find_package(fmt REQUIRED)
link_directories(/usr/local/cuda/lib64)
set(SV_INCLUDE_DIRS
@SV_INSTALL_PREFIX@/include
/usr/include/x86_64-linux-gnu
/usr/local/cuda/include
${OpenCV_INCLUDE_DIRS}
)
set(SV_LIBRARIES
@SV_INSTALL_PREFIX@/lib/libsv_yoloplugins.so
@SV_INSTALL_PREFIX@/lib/libsv_world.so
@SV_INSTALL_PREFIX@/lib/libsv_gimbal.so
${OpenCV_LIBS}
nvinfer cudart rt pthread
@FFMPEG_LIBS@ fmt
)
]])
elseif(PLATFORM STREQUAL "X86_CPU")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[
@PACKAGE_INIT@
find_package(OpenCV 4 REQUIRED)
find_package(fmt REQUIRED)
set(SV_INCLUDE_DIRS
@SV_INSTALL_PREFIX@/include
/usr/include/x86_64-linux-gnu
${OpenCV_INCLUDE_DIRS}
)
set(SV_LIBRARIES
@SV_INSTALL_PREFIX@/lib/libsv_world.so
@SV_INSTALL_PREFIX@/lib/libsv_gimbal.so
${OpenCV_LIBS}
rt pthread
@FFMPEG_LIBS@ fmt
)
]])
endif()
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config-version.cmake
VERSION ${PROJECT_VERSION}
COMPATIBILITY AnyNewerVersion
)
configure_package_config_file(${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
INSTALL_DESTINATION lib/cmake/${PROJECT_NAME}
)
install(FILES
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config-version.cmake
DESTINATION lib/cmake/${PROJECT_NAME}
)

202
LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,37 +1,67 @@
# SpireCV # SpireCV 智能感知算法库
#### 介绍 ## 项目概况
SpireCV是一个专为智能无人系统打造的边缘实时感知SDK主要功能包括相机/吊舱控制、视频保存与推流、目标探测识别与跟踪、边缘数据管理迭代等。旨在为移动机器人开发者提供高性能、高可靠、接口简洁、功能丰富的视觉感知能力。
#### 软件架构 SpireCV是一个专为**智能无人系统**打造的**边缘实时感知SDK**,主要功能包括**相机/吊舱控制**、**视频保存与推流**、**目标探测识别与跟踪**、**边缘数据管理迭代**等。旨在为移动机器人开发者提供高性能、高可靠、接口简洁、功能丰富的视觉感知能力。
软件架构说明
- Githubhttps://github.com/amov-lab/SpireCV
- Giteehttps://gitee.com/amovlab/SpireCV
- **开源项目维护不易还烦请点一个star收藏谢谢支持**
#### 安装教程 ## 快速入门
1. xxxx - 安装及使用:[SpireCV使用手册](https://wiki.amovlab.com/public/spirecv-wiki/)
2. xxxx - 需掌握C++语言基础、CMake编译工具基础。
3. xxxx - 需要掌握OpenCV视觉库基础了解CUDA、OpenVINO、RKNN和CANN等计算库。
- 需要了解ROS基本概念及基本操作。
#### 使用说明 - 答疑及交流:
- 答疑论坛(官方定期答疑,推荐):[阿木社区-SpireCV问答专区](https://bbs.amovlab.com/)
- 添加微信jiayue199506备注消息SpireCV进入SpireCV智能感知算法库交流群。
- B站搜索并关注“阿木社区”开发团队定期直播答疑。
1. xxxx ## 项目框架
2. xxxx
3. xxxx
#### 参与贡献 #### 主要框架如图所示:
1. Fork 本仓库 <img width="640" src="https://pic.imgdb.cn/item/64802a0d1ddac507cca5223c.jpg"/>
2. 新建 Feat_xxx 分支
3. 提交代码
4. 新建 Pull Request
#### 目前支持情况:
- **功能层**
- [x] 视频算法模块(提供接口统一、性能高效、功能多样的感知算法)
- [x] 视频输入、保存与推流模块(提供稳定、跨平台的视频读写能力)
- [x] 相机、吊舱控制模块(针对典型硬件生态打通接口,易使用)
- [x] 感知信息交互模块提供UDP通信协议
- [x] [ROS接口](https://gitee.com/amovlab1/spirecv-ros.git)
- **平台层**
- [x] X86+Nvidia GPU推荐10系、20系、30系显卡
- [x] JetsonAGX Orin/Xavier、Orin NX/Nano、Xavier NX
- [ ] Intel CPU推进中
- [ ] Rockchip推进中
- [ ] HUAWEI Ascend推进中
#### 特技 ## 功能展示
- **二维码检测**
<img width="400" src="https://pic.imgdb.cn/item/648bc90a1ddac507cc759c26.gif"/>
- **起降标志检测**
<img width="400" src="https://pic.imgdb.cn/item/648bc90a1ddac507cc759e6f.gif"/>
- **椭圆检测**
<img width="400" src="https://pic.imgdb.cn/item/648bc90c1ddac507cc75a0e2.gif"/>
- **目标框选跟踪**
<img width="400" src="https://pic.imgdb.cn/item/648bc90b1ddac507cc75a003.gif"/>
- **通用目标检测**
<img width="400" src="https://pic.imgdb.cn/item/648bc9151ddac507cc75b26e.gif"/>
- **低延迟推流**
<img width="400" src="https://pic.imgdb.cn/item/648bc9091ddac507cc759b1e.gif"/>
1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md ## 版权声明
2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com)
3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目 - 本项目受 Apache License 2.0 协议保护。
4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目 - 本项目仅限个人使用,请勿用于商业用途。
5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help) - 如利用本项目进行营利活动,阿木实验室将追究侵权行为。
6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)

View File

@ -0,0 +1,310 @@
#include "common_det_cuda_impl.h"
#include <cmath>
#include <fstream>
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
#ifdef WITH_CUDA
#include "yolov7/cuda_utils.h"
#include "yolov7/logging.h"
#include "yolov7/utils.h"
#include "yolov7/preprocess.h"
#include "yolov7/postprocess.h"
#include "yolov7/model.h"
#define TRTCHECK(status) \
do \
{ \
auto ret = (status); \
if (ret != 0) \
{ \
std::cerr << "Cuda failure: " << ret << std::endl; \
abort(); \
} \
} while (0)
#define DEVICE 0 // GPU id
#define BATCH_SIZE 1
#define MAX_IMAGE_INPUT_SIZE_THRESH 3000 * 3000 // ensure it exceed the maximum size in the input images !
#endif
namespace sv {
using namespace cv;
#ifdef WITH_CUDA
using namespace nvinfer1;
static Logger g_nvlogger;
const static int kOutputSize = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
const static int kOutputSize1 = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
const static int kOutputSize2 = 32 * (640 / 4) * (640 / 4);
#endif
CommonObjectDetectorCUDAImpl::CommonObjectDetectorCUDAImpl()
{
#ifdef WITH_CUDA
this->_gpu_buffers[0] = nullptr;
this->_gpu_buffers[1] = nullptr;
this->_gpu_buffers[2] = nullptr;
this->_cpu_output_buffer = nullptr;
this->_cpu_output_buffer1 = nullptr;
this->_cpu_output_buffer2 = nullptr;
this->_context = nullptr;
this->_engine = nullptr;
this->_runtime = nullptr;
#endif
}
CommonObjectDetectorCUDAImpl::~CommonObjectDetectorCUDAImpl()
{
#ifdef WITH_CUDA
// Release stream and buffers
cudaStreamDestroy(_stream);
if (_gpu_buffers[0])
CUDA_CHECK(cudaFree(_gpu_buffers[0]));
if (_gpu_buffers[1])
CUDA_CHECK(cudaFree(_gpu_buffers[1]));
if (_gpu_buffers[2])
CUDA_CHECK(cudaFree(_gpu_buffers[2]));
if (_cpu_output_buffer)
delete[] _cpu_output_buffer;
if (_cpu_output_buffer1)
delete[] _cpu_output_buffer1;
if (_cpu_output_buffer2)
delete[] _cpu_output_buffer2;
cuda_preprocess_destroy();
// Destroy the engine
if (_context)
_context->destroy();
if (_engine)
_engine->destroy();
if (_runtime)
_runtime->destroy();
#endif
}
#ifdef WITH_CUDA
void infer(IExecutionContext& context, cudaStream_t& stream, void** gpu_buffers, float* output, int batchsize) {
context.enqueue(batchsize, gpu_buffers, stream, nullptr);
// context.enqueueV2(gpu_buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output, gpu_buffers[1], batchsize * kOutputSize * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}
void infer_seg(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* output1, float* output2, int batchSize) {
context.enqueue(batchSize, buffers, stream, nullptr);
// context.enqueueV2(buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output1, buffers[1], batchSize * kOutputSize1 * sizeof(float), cudaMemcpyDeviceToHost, stream));
CUDA_CHECK(cudaMemcpyAsync(output2, buffers[2], batchSize * kOutputSize2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}
void CommonObjectDetectorCUDAImpl::_prepare_buffers(int input_h, int input_w) {
assert(this->_engine->getNbBindings() == 2);
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
const int inputIndex = this->_engine->getBindingIndex(kInputTensorName);
const int outputIndex = this->_engine->getBindingIndex(kOutputTensorName);
assert(inputIndex == 0);
assert(outputIndex == 1);
// Create GPU buffers on device
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[0]), kBatchSize * 3 * input_h * input_w * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[1]), kBatchSize * kOutputSize * sizeof(float)));
this->_cpu_output_buffer = new float[kBatchSize * kOutputSize];
}
void CommonObjectDetectorCUDAImpl::_prepare_buffers_seg(int input_h, int input_w) {
assert(this->_engine->getNbBindings() == 3);
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
const int inputIndex = this->_engine->getBindingIndex(kInputTensorName);
const int outputIndex1 = this->_engine->getBindingIndex(kOutputTensorName);
const int outputIndex2 = this->_engine->getBindingIndex("proto");
assert(inputIndex == 0);
assert(outputIndex1 == 1);
assert(outputIndex2 == 2);
// Create GPU buffers on device
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[0]), kBatchSize * 3 * input_h * input_w * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[1]), kBatchSize * kOutputSize1 * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[2]), kBatchSize * kOutputSize2 * sizeof(float)));
// Alloc CPU buffers
this->_cpu_output_buffer1 = new float[kBatchSize * kOutputSize1];
this->_cpu_output_buffer2 = new float[kBatchSize * kOutputSize2];
}
void deserialize_engine(std::string& engine_name, IRuntime** runtime, ICudaEngine** engine, IExecutionContext** context) {
std::ifstream file(engine_name, std::ios::binary);
if (!file.good()) {
std::cerr << "read " << engine_name << " error!" << std::endl;
assert(false);
}
size_t size = 0;
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
char* serialized_engine = new char[size];
assert(serialized_engine);
file.read(serialized_engine, size);
file.close();
*runtime = createInferRuntime(g_nvlogger);
assert(*runtime);
*engine = (*runtime)->deserializeCudaEngine(serialized_engine, size);
assert(*engine);
*context = (*engine)->createExecutionContext();
assert(*context);
delete[] serialized_engine;
}
#endif
void CommonObjectDetectorCUDAImpl::cudaDetect(
CommonObjectDetectorBase* base_,
cv::Mat img_,
std::vector<float>& boxes_x_,
std::vector<float>& boxes_y_,
std::vector<float>& boxes_w_,
std::vector<float>& boxes_h_,
std::vector<int>& boxes_label_,
std::vector<float>& boxes_score_,
std::vector<cv::Mat>& boxes_seg_
)
{
#ifdef WITH_CUDA
int input_h = base_->getInputH();
int input_w = base_->getInputW();
bool with_segmentation = base_->withSegmentation();
double thrs_conf = base_->getThrsConf();
double thrs_nms = base_->getThrsNms();
std::vector<cv::Mat> img_batch;
img_batch.push_back(img_);
// Preprocess
cuda_batch_preprocess(img_batch, this->_gpu_buffers[0], input_w, input_h, this->_stream);
// Run inference
if (with_segmentation)
{
infer_seg(*this->_context, this->_stream, (void**)this->_gpu_buffers, this->_cpu_output_buffer1, this->_cpu_output_buffer2, kBatchSize);
}
else
{
infer(*this->_context, this->_stream, (void**)this->_gpu_buffers, this->_cpu_output_buffer, kBatchSize);
}
// NMS
std::vector<std::vector<Detection>> res_batch;
if (with_segmentation)
{
batch_nms(res_batch, this->_cpu_output_buffer1, img_batch.size(), kOutputSize1, thrs_conf, thrs_nms);
}
else
{
batch_nms(res_batch, this->_cpu_output_buffer, img_batch.size(), kOutputSize, thrs_conf, thrs_nms);
}
std::vector<Detection> res = res_batch[0];
std::vector<cv::Mat> masks;
if (with_segmentation)
{
masks = process_mask(&(this->_cpu_output_buffer2[0]), kOutputSize2, res, input_h, input_w);
}
for (size_t j = 0; j < res.size(); j++) {
cv::Rect r = get_rect(img_, res[j].bbox, input_h, input_w);
if (r.x < 0) r.x = 0;
if (r.y < 0) r.y = 0;
if (r.x + r.width >= img_.cols) r.width = img_.cols - r.x - 1;
if (r.y + r.height >= img_.rows) r.height = img_.rows - r.y - 1;
if (r.width > 5 && r.height > 5)
{
// cv::rectangle(img_show, r, cv::Scalar(0, 0, 255), 2);
// cv::putText(img_show, vehiclenames[(int)res[j].class_id], cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 2.2, cv::Scalar(0, 0, 255), 2);
boxes_x_.push_back(r.x);
boxes_y_.push_back(r.y);
boxes_w_.push_back(r.width);
boxes_h_.push_back(r.height);
boxes_label_.push_back((int)res[j].class_id);
boxes_score_.push_back(res[j].conf);
if (with_segmentation)
{
cv::Mat mask_j = masks[j].clone();
boxes_seg_.push_back(mask_j);
}
}
}
#endif
}
bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_)
{
#ifdef WITH_CUDA
std::string dataset = base_->getDataset();
int input_h = base_->getInputH();
int input_w = base_->getInputW();
bool with_segmentation = base_->withSegmentation();
double thrs_conf = base_->getThrsConf();
double thrs_nms = base_->getThrsNms();
std::string engine_fn = get_home() + SV_MODEL_DIR + dataset + ".engine";
if (input_w == 1280)
{
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.engine";
}
if (with_segmentation)
{
base_->setInputH(640);
base_->setInputW(640);
engine_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.engine";
}
std::cout << "Load: " << engine_fn << std::endl;
if (!is_file_exist(engine_fn))
{
throw std::runtime_error("SpireCV (104) Error loading the CommonObject TensorRT model (File Not Exist)");
}
deserialize_engine(engine_fn, &this->_runtime, &this->_engine, &this->_context);
CUDA_CHECK(cudaStreamCreate(&this->_stream));
// Init CUDA preprocessing
cuda_preprocess_init(kMaxInputImageSize);
if (with_segmentation)
{
// Prepare cpu and gpu buffers
this->_prepare_buffers_seg(input_h, input_w);
}
else
{
// Prepare cpu and gpu buffers
this->_prepare_buffers(input_h, input_w);
}
return true;
#endif
return false;
}
}

View File

@ -0,0 +1,58 @@
#ifndef __SV_COMMON_DET_CUDA__
#define __SV_COMMON_DET_CUDA__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
#ifdef WITH_CUDA
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#endif
namespace sv {
class CommonObjectDetectorCUDAImpl
{
public:
CommonObjectDetectorCUDAImpl();
~CommonObjectDetectorCUDAImpl();
bool cudaSetup(CommonObjectDetectorBase* base_);
void cudaDetect(
CommonObjectDetectorBase* base_,
cv::Mat img_,
std::vector<float>& boxes_x_,
std::vector<float>& boxes_y_,
std::vector<float>& boxes_w_,
std::vector<float>& boxes_h_,
std::vector<int>& boxes_label_,
std::vector<float>& boxes_score_,
std::vector<cv::Mat>& boxes_seg_
);
#ifdef WITH_CUDA
void _prepare_buffers_seg(int input_h, int input_w);
void _prepare_buffers(int input_h, int input_w);
nvinfer1::IExecutionContext* _context;
nvinfer1::IRuntime* _runtime;
nvinfer1::ICudaEngine* _engine;
cudaStream_t _stream;
float* _gpu_buffers[3];
float* _cpu_output_buffer;
float* _cpu_output_buffer1;
float* _cpu_output_buffer2;
#endif
};
}
#endif

View File

@ -0,0 +1,97 @@
#include "calibrator.h"
#include "cuda_utils.h"
#include "utils.h"
#include <iostream>
#include <iterator>
#include <fstream>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>
static cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) {
int w, h, x, y;
float r_w = input_w / (img.cols * 1.0);
float r_h = input_h / (img.rows * 1.0);
if (r_h > r_w) {
w = input_w;
h = r_w * img.rows;
x = 0;
y = (input_h - h) / 2;
} else {
w = r_h * img.cols;
h = input_h;
x = (input_w - w) / 2;
y = 0;
}
cv::Mat re(h, w, CV_8UC3);
cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));
return out;
}
Int8EntropyCalibrator2::Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache)
: batchsize_(batchsize),
input_w_(input_w),
input_h_(input_h),
img_idx_(0),
img_dir_(img_dir),
calib_table_name_(calib_table_name),
input_blob_name_(input_blob_name),
read_cache_(read_cache) {
input_count_ = 3 * input_w * input_h * batchsize;
CUDA_CHECK(cudaMalloc(&device_input_, input_count_ * sizeof(float)));
read_files_in_dir(img_dir, img_files_);
}
Int8EntropyCalibrator2::~Int8EntropyCalibrator2() {
CUDA_CHECK(cudaFree(device_input_));
}
int Int8EntropyCalibrator2::getBatchSize() const TRT_NOEXCEPT {
return batchsize_;
}
bool Int8EntropyCalibrator2::getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT {
if (img_idx_ + batchsize_ > (int)img_files_.size()) {
return false;
}
std::vector<cv::Mat> input_imgs_;
for (int i = img_idx_; i < img_idx_ + batchsize_; i++) {
std::cout << img_files_[i] << " " << i << std::endl;
cv::Mat temp = cv::imread(img_dir_ + img_files_[i]);
if (temp.empty()) {
std::cerr << "Fatal error: image cannot open!" << std::endl;
return false;
}
cv::Mat pr_img = preprocess_img(temp, input_w_, input_h_);
input_imgs_.push_back(pr_img);
}
img_idx_ += batchsize_;
cv::Mat blob = cv::dnn::blobFromImages(input_imgs_, 1.0 / 255.0, cv::Size(input_w_, input_h_), cv::Scalar(0, 0, 0), true, false);
CUDA_CHECK(cudaMemcpy(device_input_, blob.ptr<float>(0), input_count_ * sizeof(float), cudaMemcpyHostToDevice));
assert(!strcmp(names[0], input_blob_name_));
bindings[0] = device_input_;
return true;
}
const void* Int8EntropyCalibrator2::readCalibrationCache(size_t& length) TRT_NOEXCEPT {
std::cout << "reading calib cache: " << calib_table_name_ << std::endl;
calib_cache_.clear();
std::ifstream input(calib_table_name_, std::ios::binary);
input >> std::noskipws;
if (read_cache_ && input.good()) {
std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), std::back_inserter(calib_cache_));
}
length = calib_cache_.size();
return length ? calib_cache_.data() : nullptr;
}
void Int8EntropyCalibrator2::writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT {
std::cout << "writing calib cache: " << calib_table_name_ << " size: " << length << std::endl;
std::ofstream output(calib_table_name_, std::ios::binary);
output.write(reinterpret_cast<const char*>(cache), length);
}

View File

@ -0,0 +1,36 @@
#pragma once
#include "macros.h"
#include <string>
#include <vector>
//! \class Int8EntropyCalibrator2
//!
//! \brief Implements Entropy calibrator 2.
//! CalibrationAlgoType is kENTROPY_CALIBRATION_2.
//!
class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2 {
public:
Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true);
virtual ~Int8EntropyCalibrator2();
int getBatchSize() const TRT_NOEXCEPT override;
bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override;
const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override;
void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override;
private:
int batchsize_;
int input_w_;
int input_h_;
int img_idx_;
std::string img_dir_;
std::vector<std::string> img_files_;
size_t input_count_;
std::string calib_table_name_;
const char* input_blob_name_;
bool read_cache_;
void* device_input_;
std::vector<char> calib_cache_;
};

View File

@ -0,0 +1,55 @@
#pragma once
/* --------------------------------------------------------
* These configs are related to tensorrt model, if these are changed,
* please re-compile and re-serialize the tensorrt model.
* --------------------------------------------------------*/
// For INT8, you need prepare the calibration dataset, please refer to
// https://github.com/wang-xinyu/tensorrtx/tree/master/yolov5#int8-quantization
#define USE_FP16 // set USE_INT8 or USE_FP16 or USE_FP32
// These are used to define input/output tensor names,
// you can set them to whatever you want.
const static char* kInputTensorName = "data";
const static char* kOutputTensorName = "prob";
// Detection model and Segmentation model' number of classes
// constexpr static int kNumClass = 80;
// Classfication model's number of classes
constexpr static int kClsNumClass = 1000;
constexpr static int kBatchSize = 1;
// Yolo's input width and height must by divisible by 32
// constexpr static int kInputH = 640;
// constexpr static int kInputW = 640;
// Classfication model's input shape
constexpr static int kClsInputH = 224;
constexpr static int kClsInputW = 224;
// Maximum number of output bounding boxes from yololayer plugin.
// That is maximum number of output bounding boxes before NMS.
constexpr static int kMaxNumOutputBbox = 1000;
constexpr static int kNumAnchor = 3;
// The bboxes whose confidence is lower than kIgnoreThresh will be ignored in yololayer plugin.
constexpr static float kIgnoreThresh = 0.1f;
/* --------------------------------------------------------
* These configs are NOT related to tensorrt model, if these are changed,
* please re-compile, but no need to re-serialize the tensorrt model.
* --------------------------------------------------------*/
// NMS overlapping thresh and final detection confidence thresh
const static float kNmsThresh = 0.45f;
const static float kConfThresh = 0.5f;
const static int kGpuId = 0;
// If your image size is larger than 4096 * 3112, please increase this value
const static int kMaxInputImageSize = 4096 * 3112;

View File

@ -0,0 +1,18 @@
#ifndef TRTX_CUDA_UTILS_H_
#define TRTX_CUDA_UTILS_H_
#include <cuda_runtime_api.h>
#ifndef CUDA_CHECK
#define CUDA_CHECK(callstr)\
{\
cudaError_t error_code = callstr;\
if (error_code != cudaSuccess) {\
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\
assert(0);\
}\
}
#endif // CUDA_CHECK
#endif // TRTX_CUDA_UTILS_H_

View File

@ -0,0 +1,504 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TENSORRT_LOGGING_H
#define TENSORRT_LOGGING_H
#include "NvInferRuntimeCommon.h"
#include <cassert>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <sstream>
#include <string>
#include "macros.h"
using Severity = nvinfer1::ILogger::Severity;
class LogStreamConsumerBuffer : public std::stringbuf
{
public:
LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mOutput(stream)
, mPrefix(prefix)
, mShouldLog(shouldLog)
{
}
LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
: mOutput(other.mOutput)
{
}
~LogStreamConsumerBuffer()
{
// std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
// std::streambuf::pptr() gives a pointer to the current position of the output sequence
// if the pointer to the beginning is not equal to the pointer to the current position,
// call putOutput() to log the output to the stream
if (pbase() != pptr())
{
putOutput();
}
}
// synchronizes the stream buffer and returns 0 on success
// synchronizing the stream buffer consists of inserting the buffer contents into the stream,
// resetting the buffer and flushing the stream
virtual int sync()
{
putOutput();
return 0;
}
void putOutput()
{
if (mShouldLog)
{
// prepend timestamp
std::time_t timestamp = std::time(nullptr);
tm* tm_local = std::localtime(&timestamp);
std::cout << "[";
std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
// std::stringbuf::str() gets the string contents of the buffer
// insert the buffer contents pre-appended by the appropriate prefix into the stream
mOutput << mPrefix << str();
// set the buffer to empty
str("");
// flush the stream
mOutput.flush();
}
}
void setShouldLog(bool shouldLog)
{
mShouldLog = shouldLog;
}
private:
std::ostream& mOutput;
std::string mPrefix;
bool mShouldLog;
};
//!
//! \class LogStreamConsumerBase
//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
//!
class LogStreamConsumerBase
{
public:
LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mBuffer(stream, prefix, shouldLog)
{
}
protected:
LogStreamConsumerBuffer mBuffer;
};
//!
//! \class LogStreamConsumer
//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
//! Order of base classes is LogStreamConsumerBase and then std::ostream.
//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
//! Please do not change the order of the parent classes.
//!
class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
{
public:
//! \brief Creates a LogStreamConsumer which logs messages with level severity.
//! Reportable severity determines if the messages are severe enough to be logged.
LogStreamConsumer(Severity reportableSeverity, Severity severity)
: LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(severity <= reportableSeverity)
, mSeverity(severity)
{
}
LogStreamConsumer(LogStreamConsumer&& other)
: LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(other.mShouldLog)
, mSeverity(other.mSeverity)
{
}
void setReportableSeverity(Severity reportableSeverity)
{
mShouldLog = mSeverity <= reportableSeverity;
mBuffer.setShouldLog(mShouldLog);
}
private:
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
static std::string severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
bool mShouldLog;
Severity mSeverity;
};
//! \class Logger
//!
//! \brief Class which manages logging of TensorRT tools and samples
//!
//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
//! and supports logging two types of messages:
//!
//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
//! - Test pass/fail messages
//!
//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
//!
//! In the future, this class could be extended to support dumping test results to a file in some standard format
//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
//!
//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
//! library and messages coming from the sample.
//!
//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
//! object.
class Logger : public nvinfer1::ILogger
{
public:
Logger(Severity severity = Severity::kWARNING)
: mReportableSeverity(severity)
{
}
//!
//! \enum TestResult
//! \brief Represents the state of a given test
//!
enum class TestResult
{
kRUNNING, //!< The test is running
kPASSED, //!< The test passed
kFAILED, //!< The test failed
kWAIVED //!< The test was waived
};
//!
//! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
//! \return The nvinfer1::ILogger associated with this Logger
//!
//! TODO Once all samples are updated to use this method to register the logger with TensorRT,
//! we can eliminate the inheritance of Logger from ILogger
//!
nvinfer1::ILogger& getTRTLogger()
{
return *this;
}
//!
//! \brief Implementation of the nvinfer1::ILogger::log() virtual method
//!
//! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
//! inheritance from nvinfer1::ILogger
//!
void log(Severity severity, const char* msg) TRT_NOEXCEPT override
{
LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
}
//!
//! \brief Method for controlling the verbosity of logging output
//!
//! \param severity The logger will only emit messages that have severity of this level or higher.
//!
void setReportableSeverity(Severity severity)
{
mReportableSeverity = severity;
}
//!
//! \brief Opaque handle that holds logging information for a particular test
//!
//! This object is an opaque handle to information used by the Logger to print test results.
//! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
//! with Logger::reportTest{Start,End}().
//!
class TestAtom
{
public:
TestAtom(TestAtom&&) = default;
private:
friend class Logger;
TestAtom(bool started, const std::string& name, const std::string& cmdline)
: mStarted(started)
, mName(name)
, mCmdline(cmdline)
{
}
bool mStarted;
std::string mName;
std::string mCmdline;
};
//!
//! \brief Define a test for logging
//!
//! \param[in] name The name of the test. This should be a string starting with
//! "TensorRT" and containing dot-separated strings containing
//! the characters [A-Za-z0-9_].
//! For example, "TensorRT.sample_googlenet"
//! \param[in] cmdline The command line used to reproduce the test
//
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
//!
static TestAtom defineTest(const std::string& name, const std::string& cmdline)
{
return TestAtom(false, name, cmdline);
}
//!
//! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
//! as input
//!
//! \param[in] name The name of the test
//! \param[in] argc The number of command-line arguments
//! \param[in] argv The array of command-line arguments (given as C strings)
//!
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
{
auto cmdline = genCmdlineString(argc, argv);
return defineTest(name, cmdline);
}
//!
//! \brief Report that a test has started.
//!
//! \pre reportTestStart() has not been called yet for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has started
//!
static void reportTestStart(TestAtom& testAtom)
{
reportTestResult(testAtom, TestResult::kRUNNING);
assert(!testAtom.mStarted);
testAtom.mStarted = true;
}
//!
//! \brief Report that a test has ended.
//!
//! \pre reportTestStart() has been called for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has ended
//! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
//! TestResult::kFAILED, TestResult::kWAIVED
//!
static void reportTestEnd(const TestAtom& testAtom, TestResult result)
{
assert(result != TestResult::kRUNNING);
assert(testAtom.mStarted);
reportTestResult(testAtom, result);
}
static int reportPass(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kPASSED);
return EXIT_SUCCESS;
}
static int reportFail(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kFAILED);
return EXIT_FAILURE;
}
static int reportWaive(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kWAIVED);
return EXIT_SUCCESS;
}
static int reportTest(const TestAtom& testAtom, bool pass)
{
return pass ? reportPass(testAtom) : reportFail(testAtom);
}
Severity getReportableSeverity() const
{
return mReportableSeverity;
}
private:
//!
//! \brief returns an appropriate string for prefixing a log message with the given severity
//!
static const char* severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate string for prefixing a test result message with the given result
//!
static const char* testResultString(TestResult result)
{
switch (result)
{
case TestResult::kRUNNING: return "RUNNING";
case TestResult::kPASSED: return "PASSED";
case TestResult::kFAILED: return "FAILED";
case TestResult::kWAIVED: return "WAIVED";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
//!
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
//!
//! \brief method that implements logging test results
//!
static void reportTestResult(const TestAtom& testAtom, TestResult result)
{
severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
<< testAtom.mCmdline << std::endl;
}
//!
//! \brief generate a command line string from the given (argc, argv) values
//!
static std::string genCmdlineString(int argc, char const* const* argv)
{
std::stringstream ss;
for (int i = 0; i < argc; i++)
{
if (i > 0)
ss << " ";
ss << argv[i];
}
return ss.str();
}
Severity mReportableSeverity;
};
namespace
{
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
//!
//! Example usage:
//!
//! LOG_VERBOSE(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
//!
//! Example usage:
//!
//! LOG_INFO(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_INFO(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
//!
//! Example usage:
//!
//! LOG_WARN(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_WARN(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
//!
//! Example usage:
//!
//! LOG_ERROR(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_ERROR(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
// ("fatal" severity)
//!
//! Example usage:
//!
//! LOG_FATAL(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_FATAL(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
}
} // anonymous namespace
#endif // TENSORRT_LOGGING_H

View File

@ -0,0 +1,29 @@
#ifndef __MACROS_H
#define __MACROS_H
#include <NvInfer.h>
#ifdef API_EXPORTS
#if defined(_MSC_VER)
#define API __declspec(dllexport)
#else
#define API __attribute__((visibility("default")))
#endif
#else
#if defined(_MSC_VER)
#define API __declspec(dllimport)
#else
#define API
#endif
#endif // API_EXPORTS
#if NV_TENSORRT_MAJOR >= 8
#define TRT_NOEXCEPT noexcept
#define TRT_CONST_ENQUEUE const
#else
#define TRT_NOEXCEPT
#define TRT_CONST_ENQUEUE
#endif
#endif // __MACROS_H

View File

@ -0,0 +1,628 @@
#include "model.h"
#include "calibrator.h"
#include "config.h"
#include "yololayer.h"
#include <iostream>
#include <fstream>
#include <map>
#include <cassert>
#include <cmath>
#include <cstring>
using namespace nvinfer1;
// TensorRT weight files have a simple space delimited format:
// [type] [size] <data x size in hex>
static std::map<std::string, Weights> loadWeights(const std::string file) {
std::cout << "Loading weights: " << file << std::endl;
std::map<std::string, Weights> weightMap;
// Open weights file
std::ifstream input(file);
assert(input.is_open() && "Unable to load weight file. please check if the .wts file path is right!!!!!!");
// Read number of weight blobs
int32_t count;
input >> count;
assert(count > 0 && "Invalid weight map file.");
while (count--) {
Weights wt{ DataType::kFLOAT, nullptr, 0 };
uint32_t size;
// Read name and type of blob
std::string name;
input >> name >> std::dec >> size;
wt.type = DataType::kFLOAT;
// Load blob
uint32_t* val = reinterpret_cast<uint32_t*>(malloc(sizeof(val) * size));
for (uint32_t x = 0, y = size; x < y; ++x) {
input >> std::hex >> val[x];
}
wt.values = val;
wt.count = size;
weightMap[name] = wt;
}
return weightMap;
}
static int get_width(int x, float gw, int divisor = 8) {
return int(ceil((x * gw) / divisor)) * divisor;
}
static int get_depth(int x, float gd) {
if (x == 1) return 1;
int r = round(x * gd);
if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0) {
--r;
}
return std::max<int>(r, 1);
}
static IScaleLayer* addBatchNorm2d(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, std::string lname, float eps) {
float* gamma = (float*)weightMap[lname + ".weight"].values;
float* beta = (float*)weightMap[lname + ".bias"].values;
float* mean = (float*)weightMap[lname + ".running_mean"].values;
float* var = (float*)weightMap[lname + ".running_var"].values;
int len = weightMap[lname + ".running_var"].count;
float* scval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
for (int i = 0; i < len; i++) {
scval[i] = gamma[i] / sqrt(var[i] + eps);
}
Weights scale{ DataType::kFLOAT, scval, len };
float* shval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
for (int i = 0; i < len; i++) {
shval[i] = beta[i] - mean[i] * gamma[i] / sqrt(var[i] + eps);
}
Weights shift{ DataType::kFLOAT, shval, len };
float* pval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
for (int i = 0; i < len; i++) {
pval[i] = 1.0;
}
Weights power{ DataType::kFLOAT, pval, len };
weightMap[lname + ".scale"] = scale;
weightMap[lname + ".shift"] = shift;
weightMap[lname + ".power"] = power;
IScaleLayer* scale_1 = network->addScale(input, ScaleMode::kCHANNEL, shift, scale, power);
assert(scale_1);
return scale_1;
}
static ILayer* convBlock(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int outch, int ksize, int s, int g, std::string lname) {
Weights emptywts{ DataType::kFLOAT, nullptr, 0 };
int p = ksize / 3;
IConvolutionLayer* conv1 = network->addConvolutionNd(input, outch, DimsHW{ ksize, ksize }, weightMap[lname + ".conv.weight"], emptywts);
assert(conv1);
conv1->setStrideNd(DimsHW{ s, s });
conv1->setPaddingNd(DimsHW{ p, p });
conv1->setNbGroups(g);
conv1->setName((lname + ".conv").c_str());
IScaleLayer* bn1 = addBatchNorm2d(network, weightMap, *conv1->getOutput(0), lname + ".bn", 1e-3);
// silu = x * sigmoid
auto sig = network->addActivation(*bn1->getOutput(0), ActivationType::kSIGMOID);
assert(sig);
auto ew = network->addElementWise(*bn1->getOutput(0), *sig->getOutput(0), ElementWiseOperation::kPROD);
assert(ew);
return ew;
}
static ILayer* focus(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int inch, int outch, int ksize, std::string lname, int input_h, int input_w) {
ISliceLayer* s1 = network->addSlice(input, Dims3{ 0, 0, 0 }, Dims3{ inch, input_h / 2, input_w / 2 }, Dims3{ 1, 2, 2 });
ISliceLayer* s2 = network->addSlice(input, Dims3{ 0, 1, 0 }, Dims3{ inch, input_h / 2, input_w / 2 }, Dims3{ 1, 2, 2 });
ISliceLayer* s3 = network->addSlice(input, Dims3{ 0, 0, 1 }, Dims3{ inch, input_h / 2, input_w / 2 }, Dims3{ 1, 2, 2 });
ISliceLayer* s4 = network->addSlice(input, Dims3{ 0, 1, 1 }, Dims3{ inch, input_h / 2, input_w / 2 }, Dims3{ 1, 2, 2 });
ITensor* inputTensors[] = { s1->getOutput(0), s2->getOutput(0), s3->getOutput(0), s4->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 4);
auto conv = convBlock(network, weightMap, *cat->getOutput(0), outch, ksize, 1, 1, lname + ".conv");
return conv;
}
static ILayer* bottleneck(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, bool shortcut, int g, float e, std::string lname) {
auto cv1 = convBlock(network, weightMap, input, (int)((float)c2 * e), 1, 1, 1, lname + ".cv1");
auto cv2 = convBlock(network, weightMap, *cv1->getOutput(0), c2, 3, 1, g, lname + ".cv2");
if (shortcut && c1 == c2) {
auto ew = network->addElementWise(input, *cv2->getOutput(0), ElementWiseOperation::kSUM);
return ew;
}
return cv2;
}
static ILayer* bottleneckCSP(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, int n, bool shortcut, int g, float e, std::string lname) {
Weights emptywts{ DataType::kFLOAT, nullptr, 0 };
int c_ = (int)((float)c2 * e);
auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1");
auto cv2 = network->addConvolutionNd(input, c_, DimsHW{ 1, 1 }, weightMap[lname + ".cv2.weight"], emptywts);
ITensor* y1 = cv1->getOutput(0);
for (int i = 0; i < n; i++) {
auto b = bottleneck(network, weightMap, *y1, c_, c_, shortcut, g, 1.0, lname + ".m." + std::to_string(i));
y1 = b->getOutput(0);
}
auto cv3 = network->addConvolutionNd(*y1, c_, DimsHW{ 1, 1 }, weightMap[lname + ".cv3.weight"], emptywts);
ITensor* inputTensors[] = { cv3->getOutput(0), cv2->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 2);
IScaleLayer* bn = addBatchNorm2d(network, weightMap, *cat->getOutput(0), lname + ".bn", 1e-4);
auto lr = network->addActivation(*bn->getOutput(0), ActivationType::kLEAKY_RELU);
lr->setAlpha(0.1);
auto cv4 = convBlock(network, weightMap, *lr->getOutput(0), c2, 1, 1, 1, lname + ".cv4");
return cv4;
}
static ILayer* C3(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, int n, bool shortcut, int g, float e, std::string lname) {
int c_ = (int)((float)c2 * e);
auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1");
auto cv2 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv2");
ITensor *y1 = cv1->getOutput(0);
for (int i = 0; i < n; i++) {
auto b = bottleneck(network, weightMap, *y1, c_, c_, shortcut, g, 1.0, lname + ".m." + std::to_string(i));
y1 = b->getOutput(0);
}
ITensor* inputTensors[] = { y1, cv2->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 2);
auto cv3 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv3");
return cv3;
}
static ILayer* SPP(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, int k1, int k2, int k3, std::string lname) {
int c_ = c1 / 2;
auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1");
auto pool1 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k1, k1 });
pool1->setPaddingNd(DimsHW{ k1 / 2, k1 / 2 });
pool1->setStrideNd(DimsHW{ 1, 1 });
auto pool2 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k2, k2 });
pool2->setPaddingNd(DimsHW{ k2 / 2, k2 / 2 });
pool2->setStrideNd(DimsHW{ 1, 1 });
auto pool3 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k3, k3 });
pool3->setPaddingNd(DimsHW{ k3 / 2, k3 / 2 });
pool3->setStrideNd(DimsHW{ 1, 1 });
ITensor* inputTensors[] = { cv1->getOutput(0), pool1->getOutput(0), pool2->getOutput(0), pool3->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 4);
auto cv2 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv2");
return cv2;
}
static ILayer* SPPF(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, int k, std::string lname) {
int c_ = c1 / 2;
auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1");
auto pool1 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k, k });
pool1->setPaddingNd(DimsHW{ k / 2, k / 2 });
pool1->setStrideNd(DimsHW{ 1, 1 });
auto pool2 = network->addPoolingNd(*pool1->getOutput(0), PoolingType::kMAX, DimsHW{ k, k });
pool2->setPaddingNd(DimsHW{ k / 2, k / 2 });
pool2->setStrideNd(DimsHW{ 1, 1 });
auto pool3 = network->addPoolingNd(*pool2->getOutput(0), PoolingType::kMAX, DimsHW{ k, k });
pool3->setPaddingNd(DimsHW{ k / 2, k / 2 });
pool3->setStrideNd(DimsHW{ 1, 1 });
ITensor* inputTensors[] = { cv1->getOutput(0), pool1->getOutput(0), pool2->getOutput(0), pool3->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 4);
auto cv2 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv2");
return cv2;
}
static ILayer* Proto(INetworkDefinition* network, std::map<std::string, Weights>& weightMap, ITensor& input, int c_, int c2, std::string lname) {
auto cv1 = convBlock(network, weightMap, input, c_, 3, 1, 1, lname + ".cv1");
auto upsample = network->addResize(*cv1->getOutput(0));
assert(upsample);
upsample->setResizeMode(ResizeMode::kNEAREST);
const float scales[] = {1, 2, 2};
upsample->setScales(scales, 3);
auto cv2 = convBlock(network, weightMap, *upsample->getOutput(0), c_, 3, 1, 1, lname + ".cv2");
auto cv3 = convBlock(network, weightMap, *cv2->getOutput(0), c2, 1, 1, 1, lname + ".cv3");
assert(cv3);
return cv3;
}
static std::vector<std::vector<float>> getAnchors(std::map<std::string, Weights>& weightMap, std::string lname) {
std::vector<std::vector<float>> anchors;
Weights wts = weightMap[lname + ".anchor_grid"];
int anchor_len = kNumAnchor * 2;
for (int i = 0; i < wts.count / anchor_len; i++) {
auto *p = (const float*)wts.values + i * anchor_len;
std::vector<float> anchor(p, p + anchor_len);
anchors.push_back(anchor);
}
return anchors;
}
static IPluginV2Layer* addYoLoLayer(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, std::string lname, std::vector<IConvolutionLayer*> dets, int input_h, int input_w, int n_classes, bool is_segmentation = false) {
auto creator = getPluginRegistry()->getPluginCreator("YoloLayer_TRT", "1");
auto anchors = getAnchors(weightMap, lname);
PluginField plugin_fields[2];
int netinfo[5] = {n_classes, input_w, input_h, kMaxNumOutputBbox, (int)is_segmentation};
plugin_fields[0].data = netinfo;
plugin_fields[0].length = 5;
plugin_fields[0].name = "netinfo";
plugin_fields[0].type = PluginFieldType::kFLOAT32;
//load strides from Detect layer
assert(weightMap.find(lname + ".strides") != weightMap.end() && "Not found `strides`, please check gen_wts.py!!!");
Weights strides = weightMap[lname + ".strides"];
auto *p = (const float*)(strides.values);
std::vector<int> scales(p, p + strides.count);
std::vector<YoloKernel> kernels;
for (size_t i = 0; i < anchors.size(); i++) {
YoloKernel kernel;
kernel.width = input_w / scales[i];
kernel.height = input_h / scales[i];
memcpy(kernel.anchors, &anchors[i][0], anchors[i].size() * sizeof(float));
kernels.push_back(kernel);
}
plugin_fields[1].data = &kernels[0];
plugin_fields[1].length = kernels.size();
plugin_fields[1].name = "kernels";
plugin_fields[1].type = PluginFieldType::kFLOAT32;
PluginFieldCollection plugin_data;
plugin_data.nbFields = 2;
plugin_data.fields = plugin_fields;
IPluginV2 *plugin_obj = creator->createPlugin("yololayer", &plugin_data);
std::vector<ITensor*> input_tensors;
for (auto det: dets) {
input_tensors.push_back(det->getOutput(0));
}
auto yolo = network->addPluginV2(&input_tensors[0], input_tensors.size(), *plugin_obj);
return yolo;
}
ICudaEngine* build_det_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name, int input_h, int input_w, int n_classes) {
INetworkDefinition* network = builder->createNetworkV2(0U);
// Create input tensor of shape {3, input_h, input_w}
ITensor* data = network->addInput(kInputTensorName, dt, Dims3{ 3, input_h, input_w });
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
// Backbone
auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0");
assert(conv0);
auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
auto bottleneck_csp8 = C3(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
auto spp9 = SPPF(network, weightMap, *bottleneck_csp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, "model.9");
// Head
auto conv10 = convBlock(network, weightMap, *spp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10");
auto upsample11 = network->addResize(*conv10->getOutput(0));
assert(upsample11);
upsample11->setResizeMode(ResizeMode::kNEAREST);
upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions());
ITensor* inputTensors12[] = { upsample11->getOutput(0), bottleneck_csp6->getOutput(0) };
auto cat12 = network->addConcatenation(inputTensors12, 2);
auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13");
auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14");
auto upsample15 = network->addResize(*conv14->getOutput(0));
assert(upsample15);
upsample15->setResizeMode(ResizeMode::kNEAREST);
upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());
ITensor* inputTensors16[] = { upsample15->getOutput(0), bottleneck_csp4->getOutput(0) };
auto cat16 = network->addConcatenation(inputTensors16, 2);
auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17");
// Detect
IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]);
auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18");
ITensor* inputTensors19[] = { conv18->getOutput(0), conv14->getOutput(0) };
auto cat19 = network->addConcatenation(inputTensors19, 2);
auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20");
IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]);
auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21");
ITensor* inputTensors22[] = { conv21->getOutput(0), conv10->getOutput(0) };
auto cat22 = network->addConcatenation(inputTensors22, 2);
auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]);
auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2}, input_h, input_w, n_classes);
yolo->getOutput(0)->setName(kOutputTensorName);
network->markOutput(*yolo->getOutput(0));
// Engine config
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, input_w, input_h, "./coco_calib/", "int8calib.table", kInputTensorName);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap) {
free((void*)(mem.second.values));
}
return engine;
}
ICudaEngine* build_det_p6_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name, int input_h, int input_w, int n_classes) {
INetworkDefinition* network = builder->createNetworkV2(0U);
// Create input tensor of shape {3, input_h, input_w}
ITensor* data = network->addInput(kInputTensorName, dt, Dims3{ 3, input_h, input_w });
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
// Backbone
auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0");
auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto c3_2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *c3_2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto c3_4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *c3_4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto c3_6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *c3_6->getOutput(0), get_width(768, gw), 3, 2, 1, "model.7");
auto c3_8 = C3(network, weightMap, *conv7->getOutput(0), get_width(768, gw), get_width(768, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
auto conv9 = convBlock(network, weightMap, *c3_8->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.9");
auto c3_10 = C3(network, weightMap, *conv9->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), true, 1, 0.5, "model.10");
auto sppf11 = SPPF(network, weightMap, *c3_10->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, "model.11");
// Head
auto conv12 = convBlock(network, weightMap, *sppf11->getOutput(0), get_width(768, gw), 1, 1, 1, "model.12");
auto upsample13 = network->addResize(*conv12->getOutput(0));
assert(upsample13);
upsample13->setResizeMode(ResizeMode::kNEAREST);
upsample13->setOutputDimensions(c3_8->getOutput(0)->getDimensions());
ITensor* inputTensors14[] = { upsample13->getOutput(0), c3_8->getOutput(0) };
auto cat14 = network->addConcatenation(inputTensors14, 2);
auto c3_15 = C3(network, weightMap, *cat14->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.15");
auto conv16 = convBlock(network, weightMap, *c3_15->getOutput(0), get_width(512, gw), 1, 1, 1, "model.16");
auto upsample17 = network->addResize(*conv16->getOutput(0));
assert(upsample17);
upsample17->setResizeMode(ResizeMode::kNEAREST);
upsample17->setOutputDimensions(c3_6->getOutput(0)->getDimensions());
ITensor* inputTensors18[] = { upsample17->getOutput(0), c3_6->getOutput(0) };
auto cat18 = network->addConcatenation(inputTensors18, 2);
auto c3_19 = C3(network, weightMap, *cat18->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.19");
auto conv20 = convBlock(network, weightMap, *c3_19->getOutput(0), get_width(256, gw), 1, 1, 1, "model.20");
auto upsample21 = network->addResize(*conv20->getOutput(0));
assert(upsample21);
upsample21->setResizeMode(ResizeMode::kNEAREST);
upsample21->setOutputDimensions(c3_4->getOutput(0)->getDimensions());
ITensor* inputTensors21[] = { upsample21->getOutput(0), c3_4->getOutput(0) };
auto cat22 = network->addConcatenation(inputTensors21, 2);
auto c3_23 = C3(network, weightMap, *cat22->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
auto conv24 = convBlock(network, weightMap, *c3_23->getOutput(0), get_width(256, gw), 3, 2, 1, "model.24");
ITensor* inputTensors25[] = { conv24->getOutput(0), conv20->getOutput(0) };
auto cat25 = network->addConcatenation(inputTensors25, 2);
auto c3_26 = C3(network, weightMap, *cat25->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.26");
auto conv27 = convBlock(network, weightMap, *c3_26->getOutput(0), get_width(512, gw), 3, 2, 1, "model.27");
ITensor* inputTensors28[] = { conv27->getOutput(0), conv16->getOutput(0) };
auto cat28 = network->addConcatenation(inputTensors28, 2);
auto c3_29 = C3(network, weightMap, *cat28->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.29");
auto conv30 = convBlock(network, weightMap, *c3_29->getOutput(0), get_width(768, gw), 3, 2, 1, "model.30");
ITensor* inputTensors31[] = { conv30->getOutput(0), conv12->getOutput(0) };
auto cat31 = network->addConcatenation(inputTensors31, 2);
auto c3_32 = C3(network, weightMap, *cat31->getOutput(0), get_width(2048, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.32");
// Detect
IConvolutionLayer* det0 = network->addConvolutionNd(*c3_23->getOutput(0), 3 * (n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.0.weight"], weightMap["model.33.m.0.bias"]);
IConvolutionLayer* det1 = network->addConvolutionNd(*c3_26->getOutput(0), 3 * (n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.1.weight"], weightMap["model.33.m.1.bias"]);
IConvolutionLayer* det2 = network->addConvolutionNd(*c3_29->getOutput(0), 3 * (n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.2.weight"], weightMap["model.33.m.2.bias"]);
IConvolutionLayer* det3 = network->addConvolutionNd(*c3_32->getOutput(0), 3 * (n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.3.weight"], weightMap["model.33.m.3.bias"]);
auto yolo = addYoLoLayer(network, weightMap, "model.33", std::vector<IConvolutionLayer*>{det0, det1, det2, det3}, input_h, input_w, n_classes);
yolo->getOutput(0)->setName(kOutputTensorName);
network->markOutput(*yolo->getOutput(0));
// Engine config
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, input_w, input_h, "./coco_calib/", "int8calib.table", kInputTensorName);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap) {
free((void*)(mem.second.values));
}
return engine;
}
ICudaEngine* build_cls_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) {
INetworkDefinition* network = builder->createNetworkV2(0U);
// Create input tensor
ITensor* data = network->addInput(kInputTensorName, dt, Dims3{ 3, kClsInputH, kClsInputW });
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
// Backbone
auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0");
assert(conv0);
auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
auto bottleneck_csp8 = C3(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
// Head
auto conv_class = convBlock(network, weightMap, *bottleneck_csp8->getOutput(0), 1280, 1, 1, 1, "model.9.conv");
IPoolingLayer* pool2 = network->addPoolingNd(*conv_class->getOutput(0), PoolingType::kAVERAGE, DimsHW{7, 7});
assert(pool2);
IFullyConnectedLayer* yolo = network->addFullyConnected(*pool2->getOutput(0), kClsNumClass, weightMap["model.9.linear.weight"], weightMap["model.9.linear.bias"]);
assert(yolo);
yolo->getOutput(0)->setName(kOutputTensorName);
network->markOutput(*yolo->getOutput(0));
// Engine config
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, kClsInputW, kClsInputW, "./coco_calib/", "int8calib.table", kInputTensorName);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap) {
free((void*)(mem.second.values));
}
return engine;
}
ICudaEngine* build_seg_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name, int input_h, int input_w, int n_classes) {
INetworkDefinition* network = builder->createNetworkV2(0U);
ITensor* data = network->addInput(kInputTensorName, dt, Dims3{ 3, input_h, input_w });
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
// Backbone
auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0");
assert(conv0);
auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
auto bottleneck_csp8 = C3(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
auto spp9 = SPPF(network, weightMap, *bottleneck_csp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, "model.9");
// Head
auto conv10 = convBlock(network, weightMap, *spp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10");
auto upsample11 = network->addResize(*conv10->getOutput(0));
assert(upsample11);
upsample11->setResizeMode(ResizeMode::kNEAREST);
upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions());
ITensor* inputTensors12[] = { upsample11->getOutput(0), bottleneck_csp6->getOutput(0) };
auto cat12 = network->addConcatenation(inputTensors12, 2);
auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13");
auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14");
auto upsample15 = network->addResize(*conv14->getOutput(0));
assert(upsample15);
upsample15->setResizeMode(ResizeMode::kNEAREST);
upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());
ITensor* inputTensors16[] = { upsample15->getOutput(0), bottleneck_csp4->getOutput(0) };
auto cat16 = network->addConcatenation(inputTensors16, 2);
auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17");
// Segmentation
IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (32 + n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]);
auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18");
ITensor* inputTensors19[] = { conv18->getOutput(0), conv14->getOutput(0) };
auto cat19 = network->addConcatenation(inputTensors19, 2);
auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20");
IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (32 + n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]);
auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21");
ITensor* inputTensors22[] = { conv21->getOutput(0), conv10->getOutput(0) };
auto cat22 = network->addConcatenation(inputTensors22, 2);
auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (32 + n_classes + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]);
auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2}, input_h, input_w, n_classes, true);
yolo->getOutput(0)->setName(kOutputTensorName);
network->markOutput(*yolo->getOutput(0));
auto proto = Proto(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 32, "model.24.proto");
proto->getOutput(0)->setName("proto");
network->markOutput(*proto->getOutput(0));
// Engine config
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, input_w, input_h, "./coco_calib/", "int8calib.table", kInputTensorName);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap) {
free((void*)(mem.second.values));
}
return engine;
}

View File

@ -0,0 +1,16 @@
#pragma once
#include <NvInfer.h>
#include <string>
nvinfer1::ICudaEngine* build_det_engine(unsigned int maxBatchSize, nvinfer1::IBuilder* builder,
nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt,
float& gd, float& gw, std::string& wts_name, int input_h, int input_w, int n_classes);
nvinfer1::ICudaEngine* build_det_p6_engine(unsigned int maxBatchSize, nvinfer1::IBuilder* builder,
nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt,
float& gd, float& gw, std::string& wts_name, int input_h, int input_w, int n_classes);
nvinfer1::ICudaEngine* build_cls_engine(unsigned int maxBatchSize, nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt, float& gd, float& gw, std::string& wts_name);
nvinfer1::ICudaEngine* build_seg_engine(unsigned int maxBatchSize, nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt, float& gd, float& gw, std::string& wts_name, int input_h, int input_w, int n_classes);

View File

@ -0,0 +1,198 @@
#include "postprocess.h"
#include "utils.h"
cv::Rect get_rect(cv::Mat& img, float bbox[4], int input_h, int input_w)
{
float l, r, t, b;
float r_w = input_w / (img.cols * 1.0);
float r_h = input_h / (img.rows * 1.0);
if (r_h > r_w) {
l = bbox[0] - bbox[2] / 2.f;
r = bbox[0] + bbox[2] / 2.f;
t = bbox[1] - bbox[3] / 2.f - (input_h - r_w * img.rows) / 2;
b = bbox[1] + bbox[3] / 2.f - (input_h - r_w * img.rows) / 2;
l = l / r_w;
r = r / r_w;
t = t / r_w;
b = b / r_w;
} else {
l = bbox[0] - bbox[2] / 2.f - (input_w - r_h * img.cols) / 2;
r = bbox[0] + bbox[2] / 2.f - (input_w - r_h * img.cols) / 2;
t = bbox[1] - bbox[3] / 2.f;
b = bbox[1] + bbox[3] / 2.f;
l = l / r_h;
r = r / r_h;
t = t / r_h;
b = b / r_h;
}
return cv::Rect(round(l), round(t), round(r - l), round(b - t));
}
static float iou(float lbox[4], float rbox[4]) {
float interBox[] = {
(std::max)(lbox[0] - lbox[2] / 2.f , rbox[0] - rbox[2] / 2.f), //left
(std::min)(lbox[0] + lbox[2] / 2.f , rbox[0] + rbox[2] / 2.f), //right
(std::max)(lbox[1] - lbox[3] / 2.f , rbox[1] - rbox[3] / 2.f), //top
(std::min)(lbox[1] + lbox[3] / 2.f , rbox[1] + rbox[3] / 2.f), //bottom
};
if (interBox[2] > interBox[3] || interBox[0] > interBox[1])
return 0.0f;
float interBoxS = (interBox[1] - interBox[0])*(interBox[3] - interBox[2]);
return interBoxS / (lbox[2] * lbox[3] + rbox[2] * rbox[3] - interBoxS);
}
static bool cmp(const Detection& a, const Detection& b) {
return a.conf > b.conf;
}
void nms(std::vector<Detection>& res, float* output, float conf_thresh, float nms_thresh) {
int det_size = sizeof(Detection) / sizeof(float);
std::map<float, std::vector<Detection>> m;
for (int i = 0; i < output[0] && i < kMaxNumOutputBbox; i++) {
if (output[1 + det_size * i + 4] <= conf_thresh) continue;
Detection det;
memcpy(&det, &output[1 + det_size * i], det_size * sizeof(float));
if (m.count(det.class_id) == 0) m.emplace(det.class_id, std::vector<Detection>());
m[det.class_id].push_back(det);
}
for (auto it = m.begin(); it != m.end(); it++) {
auto& dets = it->second;
std::sort(dets.begin(), dets.end(), cmp);
for (size_t m = 0; m < dets.size(); ++m) {
auto& item = dets[m];
res.push_back(item);
for (size_t n = m + 1; n < dets.size(); ++n) {
if (iou(item.bbox, dets[n].bbox) > nms_thresh) {
dets.erase(dets.begin() + n);
--n;
}
}
}
}
}
void batch_nms(std::vector<std::vector<Detection>>& res_batch, float *output, int batch_size, int output_size, float conf_thresh, float nms_thresh) {
res_batch.resize(batch_size);
for (int i = 0; i < batch_size; i++) {
nms(res_batch[i], &output[i * output_size], conf_thresh, nms_thresh);
}
}
void draw_bbox(std::vector<cv::Mat>& img_batch, std::vector<std::vector<Detection>>& res_batch, int input_h, int input_w) {
for (size_t i = 0; i < img_batch.size(); i++) {
auto& res = res_batch[i];
cv::Mat img = img_batch[i];
for (size_t j = 0; j < res.size(); j++) {
cv::Rect r = get_rect(img, res[j].bbox, input_h, input_w);
cv::rectangle(img, r, cv::Scalar(0x27, 0xC1, 0x36), 2);
cv::putText(img, std::to_string((int)res[j].class_id), cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2);
}
}
}
static cv::Rect get_downscale_rect(float bbox[4], float scale) {
float left = bbox[0] - bbox[2] / 2;
float top = bbox[1] - bbox[3] / 2;
float right = bbox[0] + bbox[2] / 2;
float bottom = bbox[1] + bbox[3] / 2;
left /= scale;
top /= scale;
right /= scale;
bottom /= scale;
return cv::Rect(round(left), round(top), round(right - left), round(bottom - top));
}
std::vector<cv::Mat> process_mask(const float* proto, int proto_size, std::vector<Detection>& dets, int input_h, int input_w) {
std::vector<cv::Mat> masks;
for (size_t i = 0; i < dets.size(); i++) {
cv::Mat mask_mat = cv::Mat::zeros(input_h / 4, input_w / 4, CV_32FC1);
auto r = get_downscale_rect(dets[i].bbox, 4);
if (r.x < 0) r.x = 0;
if (r.y < 0) r.y = 0;
if (r.x + r.width > mask_mat.cols) r.width = mask_mat.cols - r.x;
if (r.y + r.height > mask_mat.rows) r.height = mask_mat.rows - r.y;
// printf(" %d, %d, %d, %d, (%d, %d)\n", r.x, r.y, r.width, r.height, mask_mat.cols, mask_mat.rows);
for (int x = r.x; x < r.x + r.width; x++) {
for (int y = r.y; y < r.y + r.height; y++) {
float e = 0.0f;
for (int j = 0; j < 32; j++) {
e += dets[i].mask[j] * proto[j * proto_size / 32 + y * mask_mat.cols + x];
}
e = 1.0f / (1.0f + expf(-e));
mask_mat.at<float>(y, x) = e;
}
}
cv::resize(mask_mat, mask_mat, cv::Size(input_w, input_h));
masks.push_back(mask_mat);
}
return masks;
}
cv::Mat scale_mask(cv::Mat mask, cv::Mat img, int input_h, int input_w) {
int x, y, w, h;
float r_w = input_w / (img.cols * 1.0);
float r_h = input_h / (img.rows * 1.0);
if (r_h > r_w) {
w = input_w;
h = r_w * img.rows;
x = 0;
y = (input_h - h) / 2;
} else {
w = r_h * img.cols;
h = input_h;
x = (input_w - w) / 2;
y = 0;
}
cv::Rect r(x, y, w, h);
cv::Mat res;
cv::resize(mask(r), res, img.size());
return res;
}
void draw_mask_bbox(cv::Mat& img, std::vector<Detection>& dets, std::vector<cv::Mat>& masks, std::unordered_map<int, std::string>& labels_map, int input_h, int input_w) {
static std::vector<uint32_t> colors = {0xFF3838, 0xFF9D97, 0xFF701F, 0xFFB21D, 0xCFD231, 0x48F90A,
0x92CC17, 0x3DDB86, 0x1A9334, 0x00D4BB, 0x2C99A8, 0x00C2FF,
0x344593, 0x6473FF, 0x0018EC, 0x8438FF, 0x520085, 0xCB38FF,
0xFF95C8, 0xFF37C7};
for (size_t i = 0; i < dets.size(); i++) {
cv::Mat img_mask = scale_mask(masks[i], img, input_h, input_w);
auto color = colors[(int)dets[i].class_id % colors.size()];
auto bgr = cv::Scalar(color & 0xFF, color >> 8 & 0xFF, color >> 16 & 0xFF);
cv::Rect r = get_rect(img, dets[i].bbox, input_h, input_w);
for (int x = r.x; x < r.x + r.width; x++) {
for (int y = r.y; y < r.y + r.height; y++) {
float val = img_mask.at<float>(y, x);
if (val <= 0.5) continue;
img.at<cv::Vec3b>(y, x)[0] = img.at<cv::Vec3b>(y, x)[0] / 2 + bgr[0] / 2;
img.at<cv::Vec3b>(y, x)[1] = img.at<cv::Vec3b>(y, x)[1] / 2 + bgr[1] / 2;
img.at<cv::Vec3b>(y, x)[2] = img.at<cv::Vec3b>(y, x)[2] / 2 + bgr[2] / 2;
}
}
cv::rectangle(img, r, bgr, 2);
// Get the size of the text
cv::Size textSize = cv::getTextSize(labels_map[(int)dets[i].class_id] + " " + to_string_with_precision(dets[i].conf), cv::FONT_HERSHEY_PLAIN, 1.2, 2, NULL);
// Set the top left corner of the rectangle
cv::Point topLeft(r.x, r.y - textSize.height);
// Set the bottom right corner of the rectangle
cv::Point bottomRight(r.x + textSize.width, r.y + textSize.height);
// Set the thickness of the rectangle lines
int lineThickness = 2;
// Draw the rectangle on the image
cv::rectangle(img, topLeft, bottomRight, bgr, -1);
cv::putText(img, labels_map[(int)dets[i].class_id] + " " + to_string_with_precision(dets[i].conf), cv::Point(r.x, r.y + 4), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar::all(0xFF), 2);
}
}

View File

@ -0,0 +1,16 @@
#pragma once
#include "types.h"
#include <opencv2/opencv.hpp>
cv::Rect get_rect(cv::Mat& img, float bbox[4], int input_h, int input_w);
void nms(std::vector<Detection>& res, float *output, float conf_thresh, float nms_thresh = 0.5);
void batch_nms(std::vector<std::vector<Detection>>& batch_res, float *output, int batch_size, int output_size, float conf_thresh, float nms_thresh = 0.5);
void draw_bbox(std::vector<cv::Mat>& img_batch, std::vector<std::vector<Detection>>& res_batch, int input_h, int input_w);
std::vector<cv::Mat> process_mask(const float* proto, int proto_size, std::vector<Detection>& dets, int input_h, int input_w);
void draw_mask_bbox(cv::Mat& img, std::vector<Detection>& dets, std::vector<cv::Mat>& masks, std::unordered_map<int, std::string>& labels_map, int input_h, int input_w);

View File

@ -0,0 +1,153 @@
#include "preprocess.h"
#include "cuda_utils.h"
static uint8_t* img_buffer_host = nullptr;
static uint8_t* img_buffer_device = nullptr;
struct AffineMatrix {
float value[6];
};
__global__ void warpaffine_kernel(
uint8_t* src, int src_line_size, int src_width,
int src_height, float* dst, int dst_width,
int dst_height, uint8_t const_value_st,
AffineMatrix d2s, int edge) {
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = d2s.value[0];
float m_y1 = d2s.value[1];
float m_z1 = d2s.value[2];
float m_x2 = d2s.value[3];
float m_y2 = d2s.value[4];
float m_z2 = d2s.value[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = m_x1 * dx + m_y1 * dy + m_z1 + 0.5f;
float src_y = m_x2 * dx + m_y2 * dy + m_z2 + 0.5f;
float c0, c1, c2;
if (src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height) {
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
} else {
int y_low = floorf(src_y);
int x_low = floorf(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if (y_low >= 0) {
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if (y_high < src_height) {
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0];
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1];
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2];
}
// bgr to rgb
float t = c2;
c2 = c0;
c0 = t;
// normalization
c0 = c0 / 255.0f;
c1 = c1 / 255.0f;
c2 = c2 / 255.0f;
// rgbrgbrgb to rrrgggbbb
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
void cuda_preprocess(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream) {
int img_size = src_width * src_height * 3;
// copy data to pinned memory
memcpy(img_buffer_host, src, img_size);
// copy data to device memory
CUDA_CHECK(cudaMemcpyAsync(img_buffer_device, img_buffer_host, img_size, cudaMemcpyHostToDevice, stream));
AffineMatrix s2d, d2s;
float scale = std::min(dst_height / (float)src_height, dst_width / (float)src_width);
s2d.value[0] = scale;
s2d.value[1] = 0;
s2d.value[2] = -scale * src_width * 0.5 + dst_width * 0.5;
s2d.value[3] = 0;
s2d.value[4] = scale;
s2d.value[5] = -scale * src_height * 0.5 + dst_height * 0.5;
cv::Mat m2x3_s2d(2, 3, CV_32F, s2d.value);
cv::Mat m2x3_d2s(2, 3, CV_32F, d2s.value);
cv::invertAffineTransform(m2x3_s2d, m2x3_d2s);
memcpy(d2s.value, m2x3_d2s.ptr<float>(0), sizeof(d2s.value));
int jobs = dst_height * dst_width;
int threads = 256;
int blocks = ceil(jobs / (float)threads);
warpaffine_kernel<<<blocks, threads, 0, stream>>>(
img_buffer_device, src_width * 3, src_width,
src_height, dst, dst_width,
dst_height, 128, d2s, jobs);
}
void cuda_batch_preprocess(std::vector<cv::Mat>& img_batch,
float* dst, int dst_width, int dst_height,
cudaStream_t stream) {
int dst_size = dst_width * dst_height * 3;
for (size_t i = 0; i < img_batch.size(); i++) {
cuda_preprocess(img_batch[i].ptr(), img_batch[i].cols, img_batch[i].rows, &dst[dst_size * i], dst_width, dst_height, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
void cuda_preprocess_init(int max_image_size) {
// prepare input data in pinned memory
CUDA_CHECK(cudaMallocHost((void**)&img_buffer_host, max_image_size * 3));
// prepare input data in device memory
CUDA_CHECK(cudaMalloc((void**)&img_buffer_device, max_image_size * 3));
}
void cuda_preprocess_destroy() {
CUDA_CHECK(cudaFree(img_buffer_device));
CUDA_CHECK(cudaFreeHost(img_buffer_host));
}

View File

@ -0,0 +1,15 @@
#pragma once
#include <cuda_runtime.h>
#include <cstdint>
#include <opencv2/opencv.hpp>
void cuda_preprocess_init(int max_image_size);
void cuda_preprocess_destroy();
void cuda_preprocess(uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);
void cuda_batch_preprocess(std::vector<cv::Mat>& img_batch,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);

View File

@ -0,0 +1,17 @@
#pragma once
#include "config.h"
struct YoloKernel {
int width;
int height;
float anchors[kNumAnchor * 2];
};
struct alignas(float) Detection {
float bbox[4]; // center_x center_y w h
float conf; // bbox_conf * cls_conf
float class_id;
float mask[32];
};

View File

@ -0,0 +1,70 @@
#pragma once
#include <dirent.h>
#include <fstream>
#include <unordered_map>
#include <string>
#include <sstream>
#include <vector>
#include <cstring>
static inline int read_files_in_dir(const char* p_dir_name, std::vector<std::string>& file_names) {
DIR *p_dir = opendir(p_dir_name);
if (p_dir == nullptr) {
return -1;
}
struct dirent* p_file = nullptr;
while ((p_file = readdir(p_dir)) != nullptr) {
if (strcmp(p_file->d_name, ".") != 0 &&
strcmp(p_file->d_name, "..") != 0) {
//std::string cur_file_name(p_dir_name);
//cur_file_name += "/";
//cur_file_name += p_file->d_name;
std::string cur_file_name(p_file->d_name);
file_names.push_back(cur_file_name);
}
}
closedir(p_dir);
return 0;
}
// Function to trim leading and trailing whitespace from a string
static inline std::string trim_leading_whitespace(const std::string& str) {
size_t first = str.find_first_not_of(' ');
if (std::string::npos == first) {
return str;
}
size_t last = str.find_last_not_of(' ');
return str.substr(first, (last - first + 1));
}
// Src: https://stackoverflow.com/questions/16605967
static inline std::string to_string_with_precision(const float a_value, const int n = 2) {
std::ostringstream out;
out.precision(n);
out << std::fixed << a_value;
return out.str();
}
static inline int read_labels(const std::string labels_filename, std::unordered_map<int, std::string>& labels_map) {
std::ifstream file(labels_filename);
// Read each line of the file
std::string line;
int index = 0;
while (std::getline(file, line)) {
// Strip the line of any leading or trailing whitespace
line = trim_leading_whitespace(line);
// Add the stripped line to the labels_map, using the loop index as the key
labels_map[index] = line;
index++;
}
// Close the file
file.close();
return 0;
}

View File

@ -0,0 +1,280 @@
#include "yololayer.h"
#include "cuda_utils.h"
#include <cassert>
#include <vector>
#include <iostream>
namespace Tn {
template<typename T>
void write(char*& buffer, const T& val) {
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char*& buffer, T& val) {
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
namespace nvinfer1 {
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, bool is_segmentation, const std::vector<YoloKernel>& vYoloKernel) {
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
is_segmentation_ = is_segmentation;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* kNumAnchor * 2;
for (int ii = 0; ii < mKernelCount; ii++) {
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin() {
for (int ii = 0; ii < mKernelCount; ii++) {
CUDA_CHECK(cudaFree(mAnchor[ii]));
}
CUDA_CHECK(cudaFreeHost(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) {
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
read(d, is_segmentation_);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* kNumAnchor * 2;
for (int ii = 0; ii < mKernelCount; ii++) {
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
write(d, is_segmentation_);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT {
size_t s = sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount);
s += sizeof(YoloKernel) * mYoloKernel.size();
s += sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight);
s += sizeof(mMaxOutObject) + sizeof(is_segmentation_);
return s;
}
int YoloLayerPlugin::initialize() TRT_NOEXCEPT {
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT {
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT {
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT {
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT {
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT {
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT {
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT {}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {}
const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT {
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
void YoloLayerPlugin::destroy() TRT_NOEXCEPT {
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT {
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, is_segmentation_, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth,
int yoloHeight, const float anchors[kNumAnchor * 2], int classes, int outputElem, bool is_segmentation) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
if (is_segmentation) info_len_i += 32;
const float* curInput = input + bnIdx * (info_len_i * total_grid * kNumAnchor);
for (int k = 0; k < kNumAnchor; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < kIgnoreThresh) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < 5 + classes; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection);
Detection *det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
for (int i = 0; is_segmentation && i < 32; i++) {
det->mask[i] = curInput[idx + k * info_len_i * total_grid + (i + 5 + classes) * total_grid];
}
}
}
void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize) {
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemsetAsync(output + idx * outputElem, 0, sizeof(float), stream));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i) {
const auto& yolo = mYoloKernel[i];
numElem = yolo.width * yolo.height * batchSize;
if (numElem < mThreadCount) mThreadCount = numElem;
CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem, is_segmentation_);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT {
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator() {
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT {
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT {
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT {
assert(fc->nbFields == 2);
assert(strcmp(fc->fields[0].name, "netinfo") == 0);
assert(strcmp(fc->fields[1].name, "kernels") == 0);
int *p_netinfo = (int*)(fc->fields[0].data);
int class_count = p_netinfo[0];
int input_w = p_netinfo[1];
int input_h = p_netinfo[2];
int max_output_object_count = p_netinfo[3];
bool is_segmentation = (bool)p_netinfo[4];
std::vector<YoloKernel> kernels(fc->fields[1].length);
memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(YoloKernel));
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, is_segmentation, kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT {
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}

View File

@ -0,0 +1,106 @@
#pragma once
#include "types.h"
#include "macros.h"
#include <vector>
#include <string>
namespace nvinfer1 {
class API YoloLayerPlugin : public IPluginV2IOExt {
public:
YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, bool is_segmentation, const std::vector<YoloKernel>& vYoloKernel);
YoloLayerPlugin(const void* data, size_t length);
~YoloLayerPlugin();
int getNbOutputs() const TRT_NOEXCEPT override { return 1; }
Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT override;
int initialize() TRT_NOEXCEPT override;
virtual void terminate() TRT_NOEXCEPT override {};
virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT override { return 0; }
virtual int enqueue(int batchSize, const void* const* inputs, void*TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT override;
virtual size_t getSerializationSize() const TRT_NOEXCEPT override;
virtual void serialize(void* buffer) const TRT_NOEXCEPT override;
bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const TRT_NOEXCEPT override {
return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT;
}
const char* getPluginType() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
void destroy() TRT_NOEXCEPT override;
IPluginV2IOExt* clone() const TRT_NOEXCEPT override;
void setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT override;
const char* getPluginNamespace() const TRT_NOEXCEPT override;
DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT override;
bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT override;
bool canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT override;
void attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT override;
void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT override;
void detachFromContext() TRT_NOEXCEPT override;
private:
void forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize = 1);
int mThreadCount = 256;
const char* mPluginNamespace;
int mKernelCount;
int mClassCount;
int mYoloV5NetWidth;
int mYoloV5NetHeight;
int mMaxOutObject;
bool is_segmentation_;
std::vector<YoloKernel> mYoloKernel;
void** mAnchor;
};
class API YoloPluginCreator : public IPluginCreator {
public:
YoloPluginCreator();
~YoloPluginCreator() override = default;
const char* getPluginName() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
const PluginFieldCollection* getFieldNames() TRT_NOEXCEPT override;
IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT override;
IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT override;
void setPluginNamespace(const char* libNamespace) TRT_NOEXCEPT override {
mNamespace = libNamespace;
}
const char* getPluginNamespace() const TRT_NOEXCEPT override {
return mNamespace.c_str();
}
private:
std::string mNamespace;
static PluginFieldCollection mFC;
static std::vector<PluginField> mPluginAttributes;
};
REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
};

View File

@ -0,0 +1,62 @@
#include "sv_common_det.h"
#include <cmath>
#include <fstream>
#ifdef WITH_CUDA
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#include "common_det_cuda_impl.h"
#endif
namespace sv {
CommonObjectDetector::CommonObjectDetector()
{
this->_cuda_impl = new CommonObjectDetectorCUDAImpl;
}
CommonObjectDetector::~CommonObjectDetector()
{
}
bool CommonObjectDetector::setupImpl()
{
#ifdef WITH_CUDA
return this->_cuda_impl->cudaSetup(this);
#endif
return false;
}
void CommonObjectDetector::detectImpl(
cv::Mat img_,
std::vector<float>& boxes_x_,
std::vector<float>& boxes_y_,
std::vector<float>& boxes_w_,
std::vector<float>& boxes_h_,
std::vector<int>& boxes_label_,
std::vector<float>& boxes_score_,
std::vector<cv::Mat>& boxes_seg_
)
{
#ifdef WITH_CUDA
this->_cuda_impl->cudaDetect(
this,
img_,
boxes_x_,
boxes_y_,
boxes_w_,
boxes_h_,
boxes_label_,
boxes_score_,
boxes_seg_
);
#endif
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,160 @@
#include "landing_det_cuda_impl.h"
#include <cmath>
#include <fstream>
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
#ifdef WITH_CUDA
#include "yolov7/logging.h"
#define TRTCHECK(status) \
do \
{ \
auto ret = (status); \
if (ret != 0) \
{ \
std::cerr << "Cuda failure: " << ret << std::endl; \
abort(); \
} \
} while (0)
#define DEVICE 0 // GPU id
#define BATCH_SIZE 1
#define MAX_IMAGE_INPUT_SIZE_THRESH 3000 * 3000 // ensure it exceed the maximum size in the input images !
#endif
namespace sv {
using namespace cv;
#ifdef WITH_CUDA
using namespace nvinfer1;
static Logger g_nvlogger;
#endif
LandingMarkerDetectorCUDAImpl::LandingMarkerDetectorCUDAImpl()
{
}
LandingMarkerDetectorCUDAImpl::~LandingMarkerDetectorCUDAImpl()
{
}
bool LandingMarkerDetectorCUDAImpl::cudaSetup()
{
#ifdef WITH_CUDA
std::string trt_model_fn = get_home() + SV_MODEL_DIR + "LandingMarker.engine";
if (!is_file_exist(trt_model_fn))
{
throw std::runtime_error("SpireCV (104) Error loading the LandingMarker TensorRT model (File Not Exist)");
}
char *trt_model_stream{nullptr};
size_t trt_model_size{0};
try
{
std::ifstream file(trt_model_fn, std::ios::binary);
file.seekg(0, file.end);
trt_model_size = file.tellg();
file.seekg(0, file.beg);
trt_model_stream = new char[trt_model_size];
assert(trt_model_stream);
file.read(trt_model_stream, trt_model_size);
file.close();
}
catch (const std::runtime_error &e)
{
throw std::runtime_error("SpireCV (104) Error loading the TensorRT model!");
}
// TensorRT
IRuntime *runtime = nvinfer1::createInferRuntime(g_nvlogger);
assert(runtime != nullptr);
ICudaEngine *p_cu_engine = runtime->deserializeCudaEngine(trt_model_stream, trt_model_size);
assert(p_cu_engine != nullptr);
this->_trt_context = p_cu_engine->createExecutionContext();
assert(this->_trt_context != nullptr);
delete[] trt_model_stream;
const ICudaEngine &cu_engine = this->_trt_context->getEngine();
assert(cu_engine.getNbBindings() == 2);
this->_input_index = cu_engine.getBindingIndex("input");
this->_output_index = cu_engine.getBindingIndex("output");
TRTCHECK(cudaMalloc(&_p_buffers[this->_input_index], 1 * 3 * 32 * 32 * sizeof(float)));
TRTCHECK(cudaMalloc(&_p_buffers[this->_output_index], 1 * 11 * sizeof(float)));
TRTCHECK(cudaStreamCreate(&_cu_stream));
auto input_dims = nvinfer1::Dims4{1, 3, 32, 32};
this->_trt_context->setBindingDimensions(this->_input_index, input_dims);
this->_p_data = new float[1 * 3 * 32 * 32];
this->_p_prob = new float[1 * 11];
// Input
TRTCHECK(cudaMemcpyAsync(_p_buffers[this->_input_index], this->_p_data, 1 * 3 * 32 * 32 * sizeof(float), cudaMemcpyHostToDevice, this->_cu_stream));
// this->_trt_context->enqueue(1, _p_buffers, this->_cu_stream, nullptr);
this->_trt_context->enqueueV2(_p_buffers, this->_cu_stream, nullptr);
// Output
TRTCHECK(cudaMemcpyAsync(this->_p_prob, _p_buffers[this->_output_index], 1 * 11 * sizeof(float), cudaMemcpyDeviceToHost, this->_cu_stream));
cudaStreamSynchronize(this->_cu_stream);
return true;
#endif
return false;
}
void LandingMarkerDetectorCUDAImpl::cudaRoiCNN(
std::vector<cv::Mat>& input_rois_,
std::vector<int>& output_labels_
)
{
#ifdef WITH_CUDA
output_labels_.clear();
for (int i=0; i<input_rois_.size(); i++)
{
cv::Mat e_roi = input_rois_[i];
for (int row = 0; row < 32; ++row)
{
uchar *uc_pixel = e_roi.data + row * e_roi.step; // compute row id
for (int col = 0; col < 32; ++col)
{
// mean=[136.20, 141.50, 145.41], std=[44.77, 44.20, 44.30]
this->_p_data[col + row * 32] = ((float)uc_pixel[0] - 136.20f) / 44.77f;
this->_p_data[col + row * 32 + 32 * 32] = ((float)uc_pixel[1] - 141.50f) / 44.20f;
this->_p_data[col + row * 32 + 32 * 32 * 2] = ((float)uc_pixel[2] - 145.41f) / 44.30f;
uc_pixel += 3;
}
}
// Input
TRTCHECK(cudaMemcpyAsync(_p_buffers[this->_input_index], this->_p_data, 1 * 3 * 32 * 32 * sizeof(float), cudaMemcpyHostToDevice, this->_cu_stream));
// this->_trt_context->enqueue(1, _p_buffers, this->_cu_stream, nullptr);
this->_trt_context->enqueueV2(_p_buffers, this->_cu_stream, nullptr);
// Output
TRTCHECK(cudaMemcpyAsync(this->_p_prob, _p_buffers[this->_output_index], 1 * 11 * sizeof(float), cudaMemcpyDeviceToHost, this->_cu_stream));
cudaStreamSynchronize(this->_cu_stream);
// Find max index
double max = 0;
int label = 0;
for (int i = 0; i < 11; ++i)
{
if (max < this->_p_prob[i])
{
max = this->_p_prob[i];
label = i;
}
}
output_labels_.push_back(label);
}
#endif
}
}

View File

@ -0,0 +1,48 @@
#ifndef __SV_LANDING_DET_CUDA__
#define __SV_LANDING_DET_CUDA__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
#ifdef WITH_CUDA
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#endif
namespace sv {
class LandingMarkerDetectorCUDAImpl
{
public:
LandingMarkerDetectorCUDAImpl();
~LandingMarkerDetectorCUDAImpl();
bool cudaSetup();
void cudaRoiCNN(
std::vector<cv::Mat>& input_rois_,
std::vector<int>& output_labels_
);
#ifdef WITH_CUDA
float *_p_data;
float *_p_prob;
nvinfer1::IExecutionContext *_trt_context;
int _input_index;
int _output_index;
void *_p_buffers[2];
cudaStream_t _cu_stream;
#endif
};
}
#endif

View File

@ -0,0 +1,48 @@
#include "sv_landing_det.h"
#include <cmath>
#include <fstream>
#ifdef WITH_CUDA
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#include "landing_det_cuda_impl.h"
#endif
namespace sv {
LandingMarkerDetector::LandingMarkerDetector()
{
this->_cuda_impl = new LandingMarkerDetectorCUDAImpl;
}
LandingMarkerDetector::~LandingMarkerDetector()
{
}
bool LandingMarkerDetector::setupImpl()
{
#ifdef WITH_CUDA
return this->_cuda_impl->cudaSetup();
#endif
return false;
}
void LandingMarkerDetector::roiCNN(
std::vector<cv::Mat>& input_rois_,
std::vector<int>& output_labels_
)
{
#ifdef WITH_CUDA
this->_cuda_impl->cudaRoiCNN(
input_rois_,
output_labels_
);
#endif
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,135 @@
#include "tracking_ocv470_impl.h"
#include <cmath>
#include <fstream>
#define SV_MODEL_DIR "/SpireCV/models/"
#define SV_ROOT_DIR "/SpireCV/"
namespace sv {
using namespace cv;
SingleObjectTrackerOCV470Impl::SingleObjectTrackerOCV470Impl()
{
}
SingleObjectTrackerOCV470Impl::~SingleObjectTrackerOCV470Impl()
{
}
bool SingleObjectTrackerOCV470Impl::ocv470Setup(SingleObjectTrackerBase* base_)
{
this->_algorithm = base_->getAlgorithm();
this->_backend = base_->getBackend();
this->_target = base_->getTarget();
#ifdef WITH_OCV470
std::string net = get_home() + SV_MODEL_DIR + "dasiamrpn_model.onnx";
std::string kernel_cls1 = get_home() + SV_MODEL_DIR + "dasiamrpn_kernel_cls1.onnx";
std::string kernel_r1 = get_home() + SV_MODEL_DIR + "dasiamrpn_kernel_r1.onnx";
std::string backbone = get_home() + SV_MODEL_DIR + "nanotrack_backbone_sim.onnx";
std::string neckhead = get_home() + SV_MODEL_DIR + "nanotrack_head_sim.onnx";
try
{
TrackerNano::Params nano_params;
nano_params.backbone = samples::findFile(backbone);
nano_params.neckhead = samples::findFile(neckhead);
nano_params.backend = this->_backend;
nano_params.target = this->_target;
_nano = TrackerNano::create(nano_params);
}
catch (const cv::Exception& ee)
{
std::cerr << "Exception: " << ee.what() << std::endl;
std::cout << "Can't load the network by using the following files:" << std::endl;
std::cout << "nanoBackbone : " << backbone << std::endl;
std::cout << "nanoNeckhead : " << neckhead << std::endl;
}
try
{
TrackerDaSiamRPN::Params params;
params.model = samples::findFile(net);
params.kernel_cls1 = samples::findFile(kernel_cls1);
params.kernel_r1 = samples::findFile(kernel_r1);
params.backend = this->_backend;
params.target = this->_target;
_siam_rpn = TrackerDaSiamRPN::create(params);
}
catch (const cv::Exception& ee)
{
std::cerr << "Exception: " << ee.what() << std::endl;
std::cout << "Can't load the network by using the following files:" << std::endl;
std::cout << "siamRPN : " << net << std::endl;
std::cout << "siamKernelCL1 : " << kernel_cls1 << std::endl;
std::cout << "siamKernelR1 : " << kernel_r1 << std::endl;
}
return true;
#endif
return false;
}
void SingleObjectTrackerOCV470Impl::ocv470Init(cv::Mat img_, const cv::Rect& bounding_box_)
{
#ifdef WITH_OCV470
if (this->_algorithm == "kcf")
{
TrackerKCF::Params params;
_kcf = TrackerKCF::create(params);
_kcf->init(img_, bounding_box_);
}
else if (this->_algorithm == "csrt")
{
TrackerCSRT::Params params;
_csrt = TrackerCSRT::create(params);
_csrt->init(img_, bounding_box_);
}
else if (this->_algorithm == "siamrpn")
{
_siam_rpn->init(img_, bounding_box_);
}
else if (this->_algorithm == "nano")
{
_nano->init(img_, bounding_box_);
}
#endif
}
bool SingleObjectTrackerOCV470Impl::ocv470Track(cv::Mat img_, cv::Rect& output_bbox_)
{
#ifdef WITH_OCV470
bool ok = false;
if (this->_algorithm == "kcf")
{
ok = _kcf->update(img_, output_bbox_);
}
else if (this->_algorithm == "csrt")
{
ok = _csrt->update(img_, output_bbox_);
}
else if (this->_algorithm == "siamrpn")
{
ok = _siam_rpn->update(img_, output_bbox_);
}
else if (this->_algorithm == "nano")
{
ok = _nano->update(img_, output_bbox_);
}
return ok;
#endif
return false;
}
}

View File

@ -0,0 +1,40 @@
#ifndef __SV_TRACKING_OCV470__
#define __SV_TRACKING_OCV470__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
namespace sv {
class SingleObjectTrackerOCV470Impl
{
public:
SingleObjectTrackerOCV470Impl();
~SingleObjectTrackerOCV470Impl();
bool ocv470Setup(SingleObjectTrackerBase* base_);
void ocv470Init(cv::Mat img_, const cv::Rect& bounding_box_);
bool ocv470Track(cv::Mat img_, cv::Rect& output_bbox_);
std::string _algorithm;
int _backend;
int _target;
#ifdef WITH_OCV470
cv::Ptr<cv::TrackerDaSiamRPN> _siam_rpn;
cv::Ptr<cv::TrackerKCF> _kcf;
cv::Ptr<cv::TrackerCSRT> _csrt;
cv::Ptr<cv::TrackerNano> _nano;
#endif
};
}
#endif

View File

@ -0,0 +1,41 @@
#include "sv_tracking.h"
#include <cmath>
#include <fstream>
#include "tracking_ocv470_impl.h"
namespace sv {
SingleObjectTracker::SingleObjectTracker()
{
this->_ocv470_impl = new SingleObjectTrackerOCV470Impl;
}
SingleObjectTracker::~SingleObjectTracker()
{
}
bool SingleObjectTracker::setupImpl()
{
#ifdef WITH_OCV470
return this->_ocv470_impl->ocv470Setup(this);
#endif
return false;
}
void SingleObjectTracker::initImpl(cv::Mat img_, const cv::Rect& bounding_box_)
{
#ifdef WITH_OCV470
this->_ocv470_impl->ocv470Init(img_, bounding_box_);
#endif
}
bool SingleObjectTracker::trackImpl(cv::Mat img_, cv::Rect& output_bbox_)
{
#ifdef WITH_OCV470
return this->_ocv470_impl->ocv470Track(img_, output_bbox_);
#endif
return false;
}
}

9
build_on_jetson.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash -e
rm -rf build
mkdir build
cd build
cmake .. -DPLATFORM=JETSON
make -j4
sudo make install

9
build_on_x86_cuda.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash -e
rm -rf build
mkdir build
cd build
cmake .. -DPLATFORM=X86_CUDA
make -j4
sudo make install

View File

@ -0,0 +1,63 @@
# Serial Communication Library
[![Build Status](https://travis-ci.org/wjwwood/serial.svg?branch=master)](https://travis-ci.org/wjwwood/serial)*(Linux and OS X)* [![Build Status](https://ci.appveyor.com/api/projects/status/github/wjwwood/serial)](https://ci.appveyor.com/project/wjwwood/serial)*(Windows)*
This is a cross-platform library for interfacing with rs-232 serial like ports written in C++. It provides a modern C++ interface with a workflow designed to look and feel like PySerial, but with the speed and control provided by C++.
This library is in use in several robotics related projects and can be built and installed to the OS like most unix libraries with make and then sudo make install, but because it is a catkin project it can also be built along side other catkin projects in a catkin workspace.
Serial is a class that provides the basic interface common to serial libraries (open, close, read, write, etc..) and requires no extra dependencies. It also provides tight control over timeouts and control over handshaking lines.
### Documentation
Website: http://wjwwood.github.io/serial/
API Documentation: http://wjwwood.github.io/serial/doc/1.1.0/index.html
### Dependencies
Required:
* [catkin](http://www.ros.org/wiki/catkin) - cmake and Python based buildsystem
* [cmake](http://www.cmake.org) - buildsystem
* [Python](http://www.python.org) - scripting language
* [empy](http://www.alcyone.com/pyos/empy/) - Python templating library
* [catkin_pkg](http://pypi.python.org/pypi/catkin_pkg/) - Runtime Python library for catkin
Optional (for documentation):
* [Doxygen](http://www.doxygen.org/) - Documentation generation tool
* [graphviz](http://www.graphviz.org/) - Graph visualization software
### Install
Get the code:
git clone https://github.com/wjwwood/serial.git
Build:
make
Build and run the tests:
make test
Build the documentation:
make doc
Install:
make install
### License
[The MIT License](LICENSE)
### Authors
William Woodall <wjwwood@gmail.com>
John Harrison <ash.gti@gmail.com>
### Contact
William Woodall <william@osrfoundation.org>

View File

@ -0,0 +1,221 @@
/*!
* \file serial/impl/unix.h
* \author William Woodall <wjwwood@gmail.com>
* \author John Harrison <ash@greaterthaninfinity.com>
* \version 0.1
*
* \section LICENSE
*
* The MIT License
*
* Copyright (c) 2012 William Woodall, John Harrison
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* \section DESCRIPTION
*
* This provides a unix based pimpl for the Serial class. This implementation is
* based off termios.h and uses select for multiplexing the IO ports.
*
*/
#if !defined(_WIN32)
#ifndef SERIAL_IMPL_UNIX_H
#define SERIAL_IMPL_UNIX_H
#include "serial/serial.h"
#include <pthread.h>
namespace serial {
using std::size_t;
using std::string;
using std::invalid_argument;
using serial::SerialException;
using serial::IOException;
class MillisecondTimer {
public:
MillisecondTimer(const uint32_t millis);
int64_t remaining();
private:
static timespec timespec_now();
timespec expiry;
};
class serial::Serial::SerialImpl {
public:
SerialImpl (const string &port,
unsigned long baudrate,
bytesize_t bytesize,
parity_t parity,
stopbits_t stopbits,
flowcontrol_t flowcontrol);
virtual ~SerialImpl ();
void
open ();
void
close ();
bool
isOpen () const;
size_t
available ();
bool
waitReadable (uint32_t timeout);
void
waitByteTimes (size_t count);
size_t
read (uint8_t *buf, size_t size = 1);
size_t
write (const uint8_t *data, size_t length);
void
flush ();
void
flushInput ();
void
flushOutput ();
void
sendBreak (int duration);
void
setBreak (bool level);
void
setRTS (bool level);
void
setDTR (bool level);
bool
waitForChange ();
bool
getCTS ();
bool
getDSR ();
bool
getRI ();
bool
getCD ();
void
setPort (const string &port);
string
getPort () const;
void
setTimeout (Timeout &timeout);
Timeout
getTimeout () const;
void
setBaudrate (unsigned long baudrate);
unsigned long
getBaudrate () const;
void
setBytesize (bytesize_t bytesize);
bytesize_t
getBytesize () const;
void
setParity (parity_t parity);
parity_t
getParity () const;
void
setStopbits (stopbits_t stopbits);
stopbits_t
getStopbits () const;
void
setFlowcontrol (flowcontrol_t flowcontrol);
flowcontrol_t
getFlowcontrol () const;
void
readLock ();
void
readUnlock ();
void
writeLock ();
void
writeUnlock ();
protected:
void reconfigurePort ();
private:
string port_; // Path to the file descriptor
int fd_; // The current file descriptor
bool is_open_;
bool xonxoff_;
bool rtscts_;
Timeout timeout_; // Timeout for read operations
unsigned long baudrate_; // Baudrate
uint32_t byte_time_ns_; // Nanoseconds to transmit/receive a single byte
parity_t parity_; // Parity
bytesize_t bytesize_; // Size of the bytes
stopbits_t stopbits_; // Stop Bits
flowcontrol_t flowcontrol_; // Flow Control
// Mutex used to lock the read functions
pthread_mutex_t read_mutex;
// Mutex used to lock the write functions
pthread_mutex_t write_mutex;
};
}
#endif // SERIAL_IMPL_UNIX_H
#endif // !defined(_WIN32)

View File

@ -0,0 +1,207 @@
/*!
* \file serial/impl/windows.h
* \author William Woodall <wjwwood@gmail.com>
* \author John Harrison <ash@greaterthaninfinity.com>
* \version 0.1
*
* \section LICENSE
*
* The MIT License
*
* Copyright (c) 2012 William Woodall, John Harrison
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* \section DESCRIPTION
*
* This provides a windows implementation of the Serial class interface.
*
*/
#if defined(_WIN32)
#ifndef SERIAL_IMPL_WINDOWS_H
#define SERIAL_IMPL_WINDOWS_H
#include "serial/serial.h"
#include "windows.h"
namespace serial {
using std::string;
using std::wstring;
using std::invalid_argument;
using serial::SerialException;
using serial::IOException;
class serial::Serial::SerialImpl {
public:
SerialImpl (const string &port,
unsigned long baudrate,
bytesize_t bytesize,
parity_t parity,
stopbits_t stopbits,
flowcontrol_t flowcontrol);
virtual ~SerialImpl ();
void
open ();
void
close ();
bool
isOpen () const;
size_t
available ();
bool
waitReadable (uint32_t timeout);
void
waitByteTimes (size_t count);
size_t
read (uint8_t *buf, size_t size = 1);
size_t
write (const uint8_t *data, size_t length);
void
flush ();
void
flushInput ();
void
flushOutput ();
void
sendBreak (int duration);
void
setBreak (bool level);
void
setRTS (bool level);
void
setDTR (bool level);
bool
waitForChange ();
bool
getCTS ();
bool
getDSR ();
bool
getRI ();
bool
getCD ();
void
setPort (const string &port);
string
getPort () const;
void
setTimeout (Timeout &timeout);
Timeout
getTimeout () const;
void
setBaudrate (unsigned long baudrate);
unsigned long
getBaudrate () const;
void
setBytesize (bytesize_t bytesize);
bytesize_t
getBytesize () const;
void
setParity (parity_t parity);
parity_t
getParity () const;
void
setStopbits (stopbits_t stopbits);
stopbits_t
getStopbits () const;
void
setFlowcontrol (flowcontrol_t flowcontrol);
flowcontrol_t
getFlowcontrol () const;
void
readLock ();
void
readUnlock ();
void
writeLock ();
void
writeUnlock ();
protected:
void reconfigurePort ();
private:
wstring port_; // Path to the file descriptor
HANDLE fd_;
bool is_open_;
Timeout timeout_; // Timeout for read operations
unsigned long baudrate_; // Baudrate
parity_t parity_; // Parity
bytesize_t bytesize_; // Size of the bytes
stopbits_t stopbits_; // Stop Bits
flowcontrol_t flowcontrol_; // Flow Control
// Mutex used to lock the read functions
HANDLE read_mutex;
// Mutex used to lock the write functions
HANDLE write_mutex;
};
}
#endif // SERIAL_IMPL_WINDOWS_H
#endif // if defined(_WIN32)

View File

@ -0,0 +1,796 @@
/*!
* \file serial/serial.h
* \author William Woodall <wjwwood@gmail.com>
* \author John Harrison <ash.gti@gmail.com>
* \version 0.1
*
* \section LICENSE
*
* The MIT License
*
* Copyright (c) 2012 William Woodall
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* \section DESCRIPTION
*
* This provides a cross platform interface for interacting with Serial Ports.
*/
#ifndef SERIAL_H
#define SERIAL_H
#include <limits>
#include <vector>
#include <string>
#include <cstring>
#include <sstream>
#include <exception>
#include <stdexcept>
#include <serial/v8stdint.h>
#define THROW(exceptionClass, message) throw exceptionClass(__FILE__, \
__LINE__, (message))
namespace serial
{
/*!
* Enumeration defines the possible bytesizes for the serial port.
*/
typedef enum
{
fivebits = 5,
sixbits = 6,
sevenbits = 7,
eightbits = 8
} bytesize_t;
/*!
* Enumeration defines the possible parity types for the serial port.
*/
typedef enum
{
parity_none = 0,
parity_odd = 1,
parity_even = 2,
parity_mark = 3,
parity_space = 4
} parity_t;
/*!
* Enumeration defines the possible stopbit types for the serial port.
*/
typedef enum
{
stopbits_one = 1,
stopbits_two = 2,
stopbits_one_point_five
} stopbits_t;
/*!
* Enumeration defines the possible flowcontrol types for the serial port.
*/
typedef enum
{
flowcontrol_none = 0,
flowcontrol_software,
flowcontrol_hardware
} flowcontrol_t;
/*!
* Structure for setting the timeout of the serial port, times are
* in milliseconds.
*
* In order to disable the interbyte timeout, set it to Timeout::max().
*/
struct Timeout
{
#ifdef max
#undef max
#endif
static uint32_t max()
{
return std::numeric_limits<uint32_t>::max();
}
/*!
* Convenience function to generate Timeout structs using a
* single absolute timeout.
*
* \param timeout A long that defines the time in milliseconds until a
* timeout occurs after a call to read or write is made.
*
* \return Timeout struct that represents this simple timeout provided.
*/
static Timeout simpleTimeout(uint32_t timeout)
{
return Timeout(max(), timeout, 0, timeout, 0);
}
/*! Number of milliseconds between bytes received to timeout on. */
uint32_t inter_byte_timeout;
/*! A constant number of milliseconds to wait after calling read. */
uint32_t read_timeout_constant;
/*! A multiplier against the number of requested bytes to wait after
* calling read.
*/
uint32_t read_timeout_multiplier;
/*! A constant number of milliseconds to wait after calling write. */
uint32_t write_timeout_constant;
/*! A multiplier against the number of requested bytes to wait after
* calling write.
*/
uint32_t write_timeout_multiplier;
explicit Timeout(uint32_t inter_byte_timeout_ = 0,
uint32_t read_timeout_constant_ = 0,
uint32_t read_timeout_multiplier_ = 0,
uint32_t write_timeout_constant_ = 0,
uint32_t write_timeout_multiplier_ = 0)
: inter_byte_timeout(inter_byte_timeout_),
read_timeout_constant(read_timeout_constant_),
read_timeout_multiplier(read_timeout_multiplier_),
write_timeout_constant(write_timeout_constant_),
write_timeout_multiplier(write_timeout_multiplier_)
{
}
};
/*!
* Class that provides a portable serial port interface.
*/
class Serial
{
public:
/*!
* Creates a Serial object and opens the port if a port is specified,
* otherwise it remains closed until serial::Serial::open is called.
*
* \param port A std::string containing the address of the serial port,
* which would be something like 'COM1' on Windows and '/dev/ttyS0'
* on Linux.
*
* \param baudrate An unsigned 32-bit integer that represents the baudrate
*
* \param timeout A serial::Timeout struct that defines the timeout
* conditions for the serial port. \see serial::Timeout
*
* \param bytesize Size of each byte in the serial transmission of data,
* default is eightbits, possible values are: fivebits, sixbits, sevenbits,
* eightbits
*
* \param parity Method of parity, default is parity_none, possible values
* are: parity_none, parity_odd, parity_even
*
* \param stopbits Number of stop bits used, default is stopbits_one,
* possible values are: stopbits_one, stopbits_one_point_five, stopbits_two
*
* \param flowcontrol Type of flowcontrol used, default is
* flowcontrol_none, possible values are: flowcontrol_none,
* flowcontrol_software, flowcontrol_hardware
*
* \throw serial::PortNotOpenedException
* \throw serial::IOException
* \throw std::invalid_argument
*/
Serial(const std::string &port = "",
uint32_t baudrate = 9600,
Timeout timeout = Timeout(),
bytesize_t bytesize = eightbits,
parity_t parity = parity_none,
stopbits_t stopbits = stopbits_one,
flowcontrol_t flowcontrol = flowcontrol_none);
/*! Destructor */
virtual ~Serial();
/*!
* Opens the serial port as long as the port is set and the port isn't
* already open.
*
* If the port is provided to the constructor then an explicit call to open
* is not needed.
*
* \see Serial::Serial
*
* \throw std::invalid_argument
* \throw serial::SerialException
* \throw serial::IOException
*/
void
open();
/*! Gets the open status of the serial port.
*
* \return Returns true if the port is open, false otherwise.
*/
bool
isOpen() const;
/*! Closes the serial port. */
void
close();
/*! Return the number of characters in the buffer. */
size_t
available();
/*! Block until there is serial data to read or read_timeout_constant
* number of milliseconds have elapsed. The return value is true when
* the function exits with the port in a readable state, false otherwise
* (due to timeout or select interruption). */
bool
waitReadable();
/*! Block for a period of time corresponding to the transmission time of
* count characters at present serial settings. This may be used in con-
* junction with waitReadable to read larger blocks of data from the
* port. */
void
waitByteTimes(size_t count);
/*! Read a given amount of bytes from the serial port into a given buffer.
*
* The read function will return in one of three cases:
* * The number of requested bytes was read.
* * In this case the number of bytes requested will match the size_t
* returned by read.
* * A timeout occurred, in this case the number of bytes read will not
* match the amount requested, but no exception will be thrown. One of
* two possible timeouts occurred:
* * The inter byte timeout expired, this means that number of
* milliseconds elapsed between receiving bytes from the serial port
* exceeded the inter byte timeout.
* * The total timeout expired, which is calculated by multiplying the
* read timeout multiplier by the number of requested bytes and then
* added to the read timeout constant. If that total number of
* milliseconds elapses after the initial call to read a timeout will
* occur.
* * An exception occurred, in this case an actual exception will be thrown.
*
* \param buffer An uint8_t array of at least the requested size.
* \param size A size_t defining how many bytes to be read.
*
* \return A size_t representing the number of bytes read as a result of the
* call to read.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
size_t
read(uint8_t *buffer, size_t size);
/*! Read a given amount of bytes from the serial port into a give buffer.
*
* \param buffer A reference to a std::vector of uint8_t.
* \param size A size_t defining how many bytes to be read.
*
* \return A size_t representing the number of bytes read as a result of the
* call to read.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
size_t
read(std::vector<uint8_t> &buffer, size_t size = 1);
/*! Read a given amount of bytes from the serial port into a give buffer.
*
* \param buffer A reference to a std::string.
* \param size A size_t defining how many bytes to be read.
*
* \return A size_t representing the number of bytes read as a result of the
* call to read.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
size_t
read(std::string &buffer, size_t size = 1);
/*! Read a given amount of bytes from the serial port and return a string
* containing the data.
*
* \param size A size_t defining how many bytes to be read.
*
* \return A std::string containing the data read from the port.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
std::string
read(size_t size = 1);
/*! Reads in a line or until a given delimiter has been processed.
*
* Reads from the serial port until a single line has been read.
*
* \param buffer A std::string reference used to store the data.
* \param size A maximum length of a line, defaults to 65536 (2^16)
* \param eol A string to match against for the EOL.
*
* \return A size_t representing the number of bytes read.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
size_t
readline(std::string &buffer, size_t size = 65536, std::string eol = "\n");
/*! Reads in a line or until a given delimiter has been processed.
*
* Reads from the serial port until a single line has been read.
*
* \param size A maximum length of a line, defaults to 65536 (2^16)
* \param eol A string to match against for the EOL.
*
* \return A std::string containing the line.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
std::string
readline(size_t size = 65536, std::string eol = "\n");
/*! Reads in multiple lines until the serial port times out.
*
* This requires a timeout > 0 before it can be run. It will read until a
* timeout occurs and return a list of strings.
*
* \param size A maximum length of combined lines, defaults to 65536 (2^16)
*
* \param eol A string to match against for the EOL.
*
* \return A vector<string> containing the lines.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
std::vector<std::string>
readlines(size_t size = 65536, std::string eol = "\n");
/*! Write a string to the serial port.
*
* \param data A const reference containing the data to be written
* to the serial port.
*
* \param size A size_t that indicates how many bytes should be written from
* the given data buffer.
*
* \return A size_t representing the number of bytes actually written to
* the serial port.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
* \throw serial::IOException
*/
size_t
write(const uint8_t *data, size_t size);
/*! Write a string to the serial port.
*
* \param data A const reference containing the data to be written
* to the serial port.
*
* \return A size_t representing the number of bytes actually written to
* the serial port.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
* \throw serial::IOException
*/
size_t
write(const std::vector<uint8_t> &data);
/*! Write a string to the serial port.
*
* \param data A const reference containing the data to be written
* to the serial port.
*
* \return A size_t representing the number of bytes actually written to
* the serial port.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
* \throw serial::IOException
*/
size_t
write(const std::string &data);
/*! Sets the serial port identifier.
*
* \param port A const std::string reference containing the address of the
* serial port, which would be something like 'COM1' on Windows and
* '/dev/ttyS0' on Linux.
*
* \throw std::invalid_argument
*/
void
setPort(const std::string &port);
/*! Gets the serial port identifier.
*
* \see Serial::setPort
*
* \throw std::invalid_argument
*/
std::string
getPort() const;
/*! Sets the timeout for reads and writes using the Timeout struct.
*
* There are two timeout conditions described here:
* * The inter byte timeout:
* * The inter_byte_timeout component of serial::Timeout defines the
* maximum amount of time, in milliseconds, between receiving bytes on
* the serial port that can pass before a timeout occurs. Setting this
* to zero will prevent inter byte timeouts from occurring.
* * Total time timeout:
* * The constant and multiplier component of this timeout condition,
* for both read and write, are defined in serial::Timeout. This
* timeout occurs if the total time since the read or write call was
* made exceeds the specified time in milliseconds.
* * The limit is defined by multiplying the multiplier component by the
* number of requested bytes and adding that product to the constant
* component. In this way if you want a read call, for example, to
* timeout after exactly one second regardless of the number of bytes
* you asked for then set the read_timeout_constant component of
* serial::Timeout to 1000 and the read_timeout_multiplier to zero.
* This timeout condition can be used in conjunction with the inter
* byte timeout condition with out any problems, timeout will simply
* occur when one of the two timeout conditions is met. This allows
* users to have maximum control over the trade-off between
* responsiveness and efficiency.
*
* Read and write functions will return in one of three cases. When the
* reading or writing is complete, when a timeout occurs, or when an
* exception occurs.
*
* A timeout of 0 enables non-blocking mode.
*
* \param timeout A serial::Timeout struct containing the inter byte
* timeout, and the read and write timeout constants and multipliers.
*
* \see serial::Timeout
*/
void
setTimeout(Timeout &timeout);
/*! Sets the timeout for reads and writes. */
void
setTimeout(uint32_t inter_byte_timeout, uint32_t read_timeout_constant,
uint32_t read_timeout_multiplier, uint32_t write_timeout_constant,
uint32_t write_timeout_multiplier)
{
Timeout timeout(inter_byte_timeout, read_timeout_constant,
read_timeout_multiplier, write_timeout_constant,
write_timeout_multiplier);
return setTimeout(timeout);
}
/*! Gets the timeout for reads in seconds.
*
* \return A Timeout struct containing the inter_byte_timeout, and read
* and write timeout constants and multipliers.
*
* \see Serial::setTimeout
*/
Timeout
getTimeout() const;
/*! Sets the baudrate for the serial port.
*
* Possible baudrates depends on the system but some safe baudrates include:
* 110, 300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400, 56000,
* 57600, 115200
* Some other baudrates that are supported by some comports:
* 128000, 153600, 230400, 256000, 460800, 500000, 921600
*
* \param baudrate An integer that sets the baud rate for the serial port.
*
* \throw std::invalid_argument
*/
void
setBaudrate(uint32_t baudrate);
/*! Gets the baudrate for the serial port.
*
* \return An integer that sets the baud rate for the serial port.
*
* \see Serial::setBaudrate
*
* \throw std::invalid_argument
*/
uint32_t
getBaudrate() const;
/*! Sets the bytesize for the serial port.
*
* \param bytesize Size of each byte in the serial transmission of data,
* default is eightbits, possible values are: fivebits, sixbits, sevenbits,
* eightbits
*
* \throw std::invalid_argument
*/
void
setBytesize(bytesize_t bytesize);
/*! Gets the bytesize for the serial port.
*
* \see Serial::setBytesize
*
* \throw std::invalid_argument
*/
bytesize_t
getBytesize() const;
/*! Sets the parity for the serial port.
*
* \param parity Method of parity, default is parity_none, possible values
* are: parity_none, parity_odd, parity_even
*
* \throw std::invalid_argument
*/
void
setParity(parity_t parity);
/*! Gets the parity for the serial port.
*
* \see Serial::setParity
*
* \throw std::invalid_argument
*/
parity_t
getParity() const;
/*! Sets the stopbits for the serial port.
*
* \param stopbits Number of stop bits used, default is stopbits_one,
* possible values are: stopbits_one, stopbits_one_point_five, stopbits_two
*
* \throw std::invalid_argument
*/
void
setStopbits(stopbits_t stopbits);
/*! Gets the stopbits for the serial port.
*
* \see Serial::setStopbits
*
* \throw std::invalid_argument
*/
stopbits_t
getStopbits() const;
/*! Sets the flow control for the serial port.
*
* \param flowcontrol Type of flowcontrol used, default is flowcontrol_none,
* possible values are: flowcontrol_none, flowcontrol_software,
* flowcontrol_hardware
*
* \throw std::invalid_argument
*/
void
setFlowcontrol(flowcontrol_t flowcontrol);
/*! Gets the flow control for the serial port.
*
* \see Serial::setFlowcontrol
*
* \throw std::invalid_argument
*/
flowcontrol_t
getFlowcontrol() const;
/*! Flush the input and output buffers */
void
flush();
/*! Flush only the input buffer */
void
flushInput();
/*! Flush only the output buffer */
void
flushOutput();
/*! Sends the RS-232 break signal. See tcsendbreak(3). */
void
sendBreak(int duration);
/*! Set the break condition to a given level. Defaults to true. */
void
setBreak(bool level = true);
/*! Set the RTS handshaking line to the given level. Defaults to true. */
void
setRTS(bool level = true);
/*! Set the DTR handshaking line to the given level. Defaults to true. */
void
setDTR(bool level = true);
/*!
* Blocks until CTS, DSR, RI, CD changes or something interrupts it.
*
* Can throw an exception if an error occurs while waiting.
* You can check the status of CTS, DSR, RI, and CD once this returns.
* Uses TIOCMIWAIT via ioctl if available (mostly only on Linux) with a
* resolution of less than +-1ms and as good as +-0.2ms. Otherwise a
* polling method is used which can give +-2ms.
*
* \return Returns true if one of the lines changed, false if something else
* occurred.
*
* \throw SerialException
*/
bool
waitForChange();
/*! Returns the current status of the CTS line. */
bool
getCTS();
/*! Returns the current status of the DSR line. */
bool
getDSR();
/*! Returns the current status of the RI line. */
bool
getRI();
/*! Returns the current status of the CD line. */
bool
getCD();
private:
// Disable copy constructors
Serial(const Serial &);
Serial &operator=(const Serial &);
// Pimpl idiom, d_pointer
class SerialImpl;
SerialImpl *pimpl_;
// Scoped Lock Classes
class ScopedReadLock;
class ScopedWriteLock;
// Read common function
size_t
read_(uint8_t *buffer, size_t size);
// Write common function
size_t
write_(const uint8_t *data, size_t length);
};
class SerialException : public std::exception
{
// Disable copy constructors
SerialException &operator=(const SerialException &);
std::string e_what_;
public:
SerialException(const char *description)
{
std::stringstream ss;
ss << "SerialException " << description << " failed.";
e_what_ = ss.str();
}
SerialException(const SerialException &other) : e_what_(other.e_what_) {}
virtual ~SerialException() throw() {}
virtual const char *what() const throw()
{
return e_what_.c_str();
}
};
class IOException : public std::exception
{
// Disable copy constructors
IOException &operator=(const IOException &);
std::string file_;
int line_;
std::string e_what_;
int errno_;
public:
explicit IOException(std::string file, int line, int errnum)
: file_(file), line_(line), errno_(errnum)
{
std::stringstream ss;
#if defined(_WIN32) && !defined(__MINGW32__)
char error_str[1024];
strerror_s(error_str, 1024, errnum);
#else
char *error_str = strerror(errnum);
#endif
ss << "IO Exception (" << errno_ << "): " << error_str;
ss << ", file " << file_ << ", line " << line_ << ".";
e_what_ = ss.str();
}
explicit IOException(std::string file, int line, const char *description)
: file_(file), line_(line), errno_(0)
{
std::stringstream ss;
ss << "IO Exception: " << description;
ss << ", file " << file_ << ", line " << line_ << ".";
e_what_ = ss.str();
}
virtual ~IOException() throw() {}
IOException(const IOException &other) : line_(other.line_), e_what_(other.e_what_), errno_(other.errno_) {}
int getErrorNumber() const { return errno_; }
virtual const char *what() const throw()
{
return e_what_.c_str();
}
};
class PortNotOpenedException : public std::exception
{
// Disable copy constructors
const PortNotOpenedException &operator=(PortNotOpenedException);
std::string e_what_;
public:
PortNotOpenedException(const char *description)
{
std::stringstream ss;
ss << "PortNotOpenedException " << description << " failed.";
e_what_ = ss.str();
}
PortNotOpenedException(const PortNotOpenedException &other) : e_what_(other.e_what_) {}
virtual ~PortNotOpenedException() throw() {}
virtual const char *what() const throw()
{
return e_what_.c_str();
}
};
/*!
* Structure that describes a serial device.
*/
struct PortInfo
{
/*! Address of the serial port (this can be passed to the constructor of Serial). */
std::string port;
/*! Human readable description of serial device if available. */
std::string description;
/*! Hardware ID (e.g. VID:PID of USB serial devices) or "n/a" if not available. */
std::string hardware_id;
};
/* Lists the serial ports available on the system
*
* Returns a vector of available serial ports, each represented
* by a serial::PortInfo data structure:
*
* \return vector of serial::PortInfo.
*/
std::vector<PortInfo>
list_ports();
} // namespace serial
#endif

View File

@ -0,0 +1,57 @@
// This header is from the v8 google project:
// http://code.google.com/p/v8/source/browse/trunk/include/v8stdint.h
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Load definitions of standard types.
#ifndef V8STDINT_H_
#define V8STDINT_H_
#include <stddef.h>
#include <stdio.h>
#if defined(_WIN32) && !defined(__MINGW32__)
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t; // NOLINT
typedef unsigned short uint16_t; // NOLINT
typedef int int32_t;
typedef unsigned int uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
// intptr_t and friends are defined in crtdefs.h through stdio.h.
#else
#include <stdint.h>
#endif
#endif // V8STDINT_H_

View File

@ -0,0 +1,336 @@
#if defined(__linux__)
/*
* Copyright (c) 2014 Craig Lilley <cralilley@gmail.com>
* This software is made available under the terms of the MIT licence.
* A copy of the licence can be obtained from:
* http://opensource.org/licenses/MIT
*/
#include <vector>
#include <string>
#include <sstream>
#include <stdexcept>
#include <iostream>
#include <fstream>
#include <cstdio>
#include <cstdarg>
#include <cstdlib>
#include <glob.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "serial/serial.h"
using serial::PortInfo;
using std::istringstream;
using std::ifstream;
using std::getline;
using std::vector;
using std::string;
using std::cout;
using std::endl;
static vector<string> glob(const vector<string>& patterns);
static string basename(const string& path);
static string dirname(const string& path);
static bool path_exists(const string& path);
static string realpath(const string& path);
static string usb_sysfs_friendly_name(const string& sys_usb_path);
static vector<string> get_sysfs_info(const string& device_path);
static string read_line(const string& file);
static string usb_sysfs_hw_string(const string& sysfs_path);
static string format(const char* format, ...);
vector<string>
glob(const vector<string>& patterns)
{
vector<string> paths_found;
if(patterns.size() == 0)
return paths_found;
glob_t glob_results;
int glob_retval = glob(patterns[0].c_str(), 0, NULL, &glob_results);
vector<string>::const_iterator iter = patterns.begin();
while(++iter != patterns.end())
{
glob_retval = glob(iter->c_str(), GLOB_APPEND, NULL, &glob_results);
}
for(int path_index = 0; path_index < glob_results.gl_pathc; path_index++)
{
paths_found.push_back(glob_results.gl_pathv[path_index]);
}
globfree(&glob_results);
return paths_found;
}
string
basename(const string& path)
{
size_t pos = path.rfind("/");
if(pos == std::string::npos)
return path;
return string(path, pos+1, string::npos);
}
string
dirname(const string& path)
{
size_t pos = path.rfind("/");
if(pos == std::string::npos)
return path;
else if(pos == 0)
return "/";
return string(path, 0, pos);
}
bool
path_exists(const string& path)
{
struct stat sb;
if( stat(path.c_str(), &sb ) == 0 )
return true;
return false;
}
string
realpath(const string& path)
{
char* real_path = realpath(path.c_str(), NULL);
string result;
if(real_path != NULL)
{
result = real_path;
free(real_path);
}
return result;
}
string
usb_sysfs_friendly_name(const string& sys_usb_path)
{
unsigned int device_number = 0;
istringstream( read_line(sys_usb_path + "/devnum") ) >> device_number;
string manufacturer = read_line( sys_usb_path + "/manufacturer" );
string product = read_line( sys_usb_path + "/product" );
string serial = read_line( sys_usb_path + "/serial" );
if( manufacturer.empty() && product.empty() && serial.empty() )
return "";
return format("%s %s %s", manufacturer.c_str(), product.c_str(), serial.c_str() );
}
vector<string>
get_sysfs_info(const string& device_path)
{
string device_name = basename( device_path );
string friendly_name;
string hardware_id;
string sys_device_path = format( "/sys/class/tty/%s/device", device_name.c_str() );
if( device_name.compare(0,6,"ttyUSB") == 0 )
{
sys_device_path = dirname( dirname( realpath( sys_device_path ) ) );
if( path_exists( sys_device_path ) )
{
friendly_name = usb_sysfs_friendly_name( sys_device_path );
hardware_id = usb_sysfs_hw_string( sys_device_path );
}
}
else if( device_name.compare(0,6,"ttyACM") == 0 )
{
sys_device_path = dirname( realpath( sys_device_path ) );
if( path_exists( sys_device_path ) )
{
friendly_name = usb_sysfs_friendly_name( sys_device_path );
hardware_id = usb_sysfs_hw_string( sys_device_path );
}
}
else
{
// Try to read ID string of PCI device
string sys_id_path = sys_device_path + "/id";
if( path_exists( sys_id_path ) )
hardware_id = read_line( sys_id_path );
}
if( friendly_name.empty() )
friendly_name = device_name;
if( hardware_id.empty() )
hardware_id = "n/a";
vector<string> result;
result.push_back(friendly_name);
result.push_back(hardware_id);
return result;
}
string
read_line(const string& file)
{
ifstream ifs(file.c_str(), ifstream::in);
string line;
if(ifs)
{
getline(ifs, line);
}
return line;
}
string
format(const char* format, ...)
{
va_list ap;
size_t buffer_size_bytes = 256;
string result;
char* buffer = (char*)malloc(buffer_size_bytes);
if( buffer == NULL )
return result;
bool done = false;
unsigned int loop_count = 0;
while(!done)
{
va_start(ap, format);
int return_value = vsnprintf(buffer, buffer_size_bytes, format, ap);
if( return_value < 0 )
{
done = true;
}
else if( return_value >= buffer_size_bytes )
{
// Realloc and try again.
buffer_size_bytes = return_value + 1;
char* new_buffer_ptr = (char*)realloc(buffer, buffer_size_bytes);
if( new_buffer_ptr == NULL )
{
done = true;
}
else
{
buffer = new_buffer_ptr;
}
}
else
{
result = buffer;
done = true;
}
va_end(ap);
if( ++loop_count > 5 )
done = true;
}
free(buffer);
return result;
}
string
usb_sysfs_hw_string(const string& sysfs_path)
{
string serial_number = read_line( sysfs_path + "/serial" );
if( serial_number.length() > 0 )
{
serial_number = format( "SNR=%s", serial_number.c_str() );
}
string vid = read_line( sysfs_path + "/idVendor" );
string pid = read_line( sysfs_path + "/idProduct" );
return format("USB VID:PID=%s:%s %s", vid.c_str(), pid.c_str(), serial_number.c_str() );
}
vector<PortInfo>
serial::list_ports()
{
vector<PortInfo> results;
vector<string> search_globs;
search_globs.push_back("/dev/ttyACM*");
search_globs.push_back("/dev/ttyS*");
search_globs.push_back("/dev/ttyUSB*");
search_globs.push_back("/dev/tty.*");
search_globs.push_back("/dev/cu.*");
search_globs.push_back("/dev/rfcomm*");
vector<string> devices_found = glob( search_globs );
vector<string>::iterator iter = devices_found.begin();
while( iter != devices_found.end() )
{
string device = *iter++;
vector<string> sysfs_info = get_sysfs_info( device );
string friendly_name = sysfs_info[0];
string hardware_id = sysfs_info[1];
PortInfo device_entry;
device_entry.port = device;
device_entry.description = friendly_name;
device_entry.hardware_id = hardware_id;
results.push_back( device_entry );
}
return results;
}
#endif // defined(__linux__)

View File

@ -0,0 +1,286 @@
#if defined(__APPLE__)
#include <sys/param.h>
#include <stdint.h>
#include <CoreFoundation/CoreFoundation.h>
#include <IOKit/IOKitLib.h>
#include <IOKit/serial/IOSerialKeys.h>
#include <IOKit/IOBSD.h>
#include <iostream>
#include <string>
#include <vector>
#include "serial/serial.h"
using serial::PortInfo;
using std::string;
using std::vector;
#define HARDWARE_ID_STRING_LENGTH 128
string cfstring_to_string( CFStringRef cfstring );
string get_device_path( io_object_t& serial_port );
string get_class_name( io_object_t& obj );
io_registry_entry_t get_parent_iousb_device( io_object_t& serial_port );
string get_string_property( io_object_t& device, const char* property );
uint16_t get_int_property( io_object_t& device, const char* property );
string rtrim(const string& str);
string
cfstring_to_string( CFStringRef cfstring )
{
char cstring[MAXPATHLEN];
string result;
if( cfstring )
{
Boolean success = CFStringGetCString( cfstring,
cstring,
sizeof(cstring),
kCFStringEncodingASCII );
if( success )
result = cstring;
}
return result;
}
string
get_device_path( io_object_t& serial_port )
{
CFTypeRef callout_path;
string device_path;
callout_path = IORegistryEntryCreateCFProperty( serial_port,
CFSTR(kIOCalloutDeviceKey),
kCFAllocatorDefault,
0 );
if (callout_path)
{
if( CFGetTypeID(callout_path) == CFStringGetTypeID() )
device_path = cfstring_to_string( static_cast<CFStringRef>(callout_path) );
CFRelease(callout_path);
}
return device_path;
}
string
get_class_name( io_object_t& obj )
{
string result;
io_name_t class_name;
kern_return_t kern_result;
kern_result = IOObjectGetClass( obj, class_name );
if( kern_result == KERN_SUCCESS )
result = class_name;
return result;
}
io_registry_entry_t
get_parent_iousb_device( io_object_t& serial_port )
{
io_object_t device = serial_port;
io_registry_entry_t parent = 0;
io_registry_entry_t result = 0;
kern_return_t kern_result = KERN_FAILURE;
string name = get_class_name(device);
// Walk the IO Registry tree looking for this devices parent IOUSBDevice.
while( name != "IOUSBDevice" )
{
kern_result = IORegistryEntryGetParentEntry( device,
kIOServicePlane,
&parent );
if(kern_result != KERN_SUCCESS)
{
result = 0;
break;
}
device = parent;
name = get_class_name(device);
}
if(kern_result == KERN_SUCCESS)
result = device;
return result;
}
string
get_string_property( io_object_t& device, const char* property )
{
string property_name;
if( device )
{
CFStringRef property_as_cfstring = CFStringCreateWithCString (
kCFAllocatorDefault,
property,
kCFStringEncodingASCII );
CFTypeRef name_as_cfstring = IORegistryEntryCreateCFProperty(
device,
property_as_cfstring,
kCFAllocatorDefault,
0 );
if( name_as_cfstring )
{
if( CFGetTypeID(name_as_cfstring) == CFStringGetTypeID() )
property_name = cfstring_to_string( static_cast<CFStringRef>(name_as_cfstring) );
CFRelease(name_as_cfstring);
}
if(property_as_cfstring)
CFRelease(property_as_cfstring);
}
return property_name;
}
uint16_t
get_int_property( io_object_t& device, const char* property )
{
uint16_t result = 0;
if( device )
{
CFStringRef property_as_cfstring = CFStringCreateWithCString (
kCFAllocatorDefault,
property,
kCFStringEncodingASCII );
CFTypeRef number = IORegistryEntryCreateCFProperty( device,
property_as_cfstring,
kCFAllocatorDefault,
0 );
if(property_as_cfstring)
CFRelease(property_as_cfstring);
if( number )
{
if( CFGetTypeID(number) == CFNumberGetTypeID() )
{
bool success = CFNumberGetValue( static_cast<CFNumberRef>(number),
kCFNumberSInt16Type,
&result );
if( !success )
result = 0;
}
CFRelease(number);
}
}
return result;
}
string rtrim(const string& str)
{
string result = str;
string whitespace = " \t\f\v\n\r";
std::size_t found = result.find_last_not_of(whitespace);
if (found != std::string::npos)
result.erase(found+1);
else
result.clear();
return result;
}
vector<PortInfo>
serial::list_ports(void)
{
vector<PortInfo> devices_found;
CFMutableDictionaryRef classes_to_match;
io_iterator_t serial_port_iterator;
io_object_t serial_port;
mach_port_t master_port;
kern_return_t kern_result;
kern_result = IOMasterPort(MACH_PORT_NULL, &master_port);
if(kern_result != KERN_SUCCESS)
return devices_found;
classes_to_match = IOServiceMatching(kIOSerialBSDServiceValue);
if (classes_to_match == NULL)
return devices_found;
CFDictionarySetValue( classes_to_match,
CFSTR(kIOSerialBSDTypeKey),
CFSTR(kIOSerialBSDAllTypes) );
kern_result = IOServiceGetMatchingServices(master_port, classes_to_match, &serial_port_iterator);
if (KERN_SUCCESS != kern_result)
return devices_found;
while ( (serial_port = IOIteratorNext(serial_port_iterator)) )
{
string device_path = get_device_path( serial_port );
io_registry_entry_t parent = get_parent_iousb_device( serial_port );
IOObjectRelease(serial_port);
if( device_path.empty() )
continue;
PortInfo port_info;
port_info.port = device_path;
port_info.description = "n/a";
port_info.hardware_id = "n/a";
string device_name = rtrim( get_string_property( parent, "USB Product Name" ) );
string vendor_name = rtrim( get_string_property( parent, "USB Vendor Name") );
string description = rtrim( vendor_name + " " + device_name );
if( !description.empty() )
port_info.description = description;
string serial_number = rtrim(get_string_property( parent, "USB Serial Number" ) );
uint16_t vendor_id = get_int_property( parent, "idVendor" );
uint16_t product_id = get_int_property( parent, "idProduct" );
if( vendor_id && product_id )
{
char cstring[HARDWARE_ID_STRING_LENGTH];
if(serial_number.empty())
serial_number = "None";
int ret = snprintf( cstring, HARDWARE_ID_STRING_LENGTH, "USB VID:PID=%04x:%04x SNR=%s",
vendor_id,
product_id,
serial_number.c_str() );
if( (ret >= 0) && (ret < HARDWARE_ID_STRING_LENGTH) )
port_info.hardware_id = cstring;
}
devices_found.push_back(port_info);
}
IOObjectRelease(serial_port_iterator);
return devices_found;
}
#endif // defined(__APPLE__)

View File

@ -0,0 +1,152 @@
#if defined(_WIN32)
/*
* Copyright (c) 2014 Craig Lilley <cralilley@gmail.com>
* This software is made available under the terms of the MIT licence.
* A copy of the licence can be obtained from:
* http://opensource.org/licenses/MIT
*/
#include "serial/serial.h"
#include <tchar.h>
#include <windows.h>
#include <setupapi.h>
#include <initguid.h>
#include <devguid.h>
#include <cstring>
using serial::PortInfo;
using std::vector;
using std::string;
static const DWORD port_name_max_length = 256;
static const DWORD friendly_name_max_length = 256;
static const DWORD hardware_id_max_length = 256;
// Convert a wide Unicode string to an UTF8 string
std::string utf8_encode(const std::wstring &wstr)
{
int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
std::string strTo( size_needed, 0 );
WideCharToMultiByte (CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL);
return strTo;
}
vector<PortInfo>
serial::list_ports()
{
vector<PortInfo> devices_found;
HDEVINFO device_info_set = SetupDiGetClassDevs(
(const GUID *) &GUID_DEVCLASS_PORTS,
NULL,
NULL,
DIGCF_PRESENT);
unsigned int device_info_set_index = 0;
SP_DEVINFO_DATA device_info_data;
device_info_data.cbSize = sizeof(SP_DEVINFO_DATA);
while(SetupDiEnumDeviceInfo(device_info_set, device_info_set_index, &device_info_data))
{
device_info_set_index++;
// Get port name
HKEY hkey = SetupDiOpenDevRegKey(
device_info_set,
&device_info_data,
DICS_FLAG_GLOBAL,
0,
DIREG_DEV,
KEY_READ);
TCHAR port_name[port_name_max_length];
DWORD port_name_length = port_name_max_length;
LONG return_code = RegQueryValueEx(
hkey,
_T("PortName"),
NULL,
NULL,
(LPBYTE)port_name,
&port_name_length);
RegCloseKey(hkey);
if(return_code != EXIT_SUCCESS)
continue;
if(port_name_length > 0 && port_name_length <= port_name_max_length)
port_name[port_name_length-1] = '\0';
else
port_name[0] = '\0';
// Ignore parallel ports
if(_tcsstr(port_name, _T("LPT")) != NULL)
continue;
// Get port friendly name
TCHAR friendly_name[friendly_name_max_length];
DWORD friendly_name_actual_length = 0;
BOOL got_friendly_name = SetupDiGetDeviceRegistryProperty(
device_info_set,
&device_info_data,
SPDRP_FRIENDLYNAME,
NULL,
(PBYTE)friendly_name,
friendly_name_max_length,
&friendly_name_actual_length);
if(got_friendly_name == TRUE && friendly_name_actual_length > 0)
friendly_name[friendly_name_actual_length-1] = '\0';
else
friendly_name[0] = '\0';
// Get hardware ID
TCHAR hardware_id[hardware_id_max_length];
DWORD hardware_id_actual_length = 0;
BOOL got_hardware_id = SetupDiGetDeviceRegistryProperty(
device_info_set,
&device_info_data,
SPDRP_HARDWAREID,
NULL,
(PBYTE)hardware_id,
hardware_id_max_length,
&hardware_id_actual_length);
if(got_hardware_id == TRUE && hardware_id_actual_length > 0)
hardware_id[hardware_id_actual_length-1] = '\0';
else
hardware_id[0] = '\0';
#ifdef UNICODE
std::string portName = utf8_encode(port_name);
std::string friendlyName = utf8_encode(friendly_name);
std::string hardwareId = utf8_encode(hardware_id);
#else
std::string portName = port_name;
std::string friendlyName = friendly_name;
std::string hardwareId = hardware_id;
#endif
PortInfo port_entry;
port_entry.port = portName;
port_entry.description = friendlyName;
port_entry.hardware_id = hardwareId;
devices_found.push_back(port_entry);
}
SetupDiDestroyDeviceInfoList(device_info_set);
return devices_found;
}
#endif // #if defined(_WIN32)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,646 @@
#if defined(_WIN32)
/* Copyright 2012 William Woodall and John Harrison */
#include <sstream>
#include "serial/impl/win.h"
using std::string;
using std::wstring;
using std::stringstream;
using std::invalid_argument;
using serial::Serial;
using serial::Timeout;
using serial::bytesize_t;
using serial::parity_t;
using serial::stopbits_t;
using serial::flowcontrol_t;
using serial::SerialException;
using serial::PortNotOpenedException;
using serial::IOException;
inline wstring
_prefix_port_if_needed(const wstring &input)
{
static wstring windows_com_port_prefix = L"\\\\.\\";
if (input.compare(0, windows_com_port_prefix.size(), windows_com_port_prefix) != 0)
{
return windows_com_port_prefix + input;
}
return input;
}
Serial::SerialImpl::SerialImpl (const string &port, unsigned long baudrate,
bytesize_t bytesize,
parity_t parity, stopbits_t stopbits,
flowcontrol_t flowcontrol)
: port_ (port.begin(), port.end()), fd_ (INVALID_HANDLE_VALUE), is_open_ (false),
baudrate_ (baudrate), parity_ (parity),
bytesize_ (bytesize), stopbits_ (stopbits), flowcontrol_ (flowcontrol)
{
if (port_.empty () == false)
open ();
read_mutex = CreateMutex(NULL, false, NULL);
write_mutex = CreateMutex(NULL, false, NULL);
}
Serial::SerialImpl::~SerialImpl ()
{
this->close();
CloseHandle(read_mutex);
CloseHandle(write_mutex);
}
void
Serial::SerialImpl::open ()
{
if (port_.empty ()) {
throw invalid_argument ("Empty port is invalid.");
}
if (is_open_ == true) {
throw SerialException ("Serial port already open.");
}
// See: https://github.com/wjwwood/serial/issues/84
wstring port_with_prefix = _prefix_port_if_needed(port_);
LPCWSTR lp_port = port_with_prefix.c_str();
fd_ = CreateFileW(lp_port,
GENERIC_READ | GENERIC_WRITE,
0,
0,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
0);
if (fd_ == INVALID_HANDLE_VALUE) {
DWORD create_file_err = GetLastError();
stringstream ss;
switch (create_file_err) {
case ERROR_FILE_NOT_FOUND:
// Use this->getPort to convert to a std::string
ss << "Specified port, " << this->getPort() << ", does not exist.";
THROW (IOException, ss.str().c_str());
default:
ss << "Unknown error opening the serial port: " << create_file_err;
THROW (IOException, ss.str().c_str());
}
}
reconfigurePort();
is_open_ = true;
}
void
Serial::SerialImpl::reconfigurePort ()
{
if (fd_ == INVALID_HANDLE_VALUE) {
// Can only operate on a valid file descriptor
THROW (IOException, "Invalid file descriptor, is the serial port open?");
}
DCB dcbSerialParams = {0};
dcbSerialParams.DCBlength=sizeof(dcbSerialParams);
if (!GetCommState(fd_, &dcbSerialParams)) {
//error getting state
THROW (IOException, "Error getting the serial port state.");
}
// setup baud rate
switch (baudrate_) {
#ifdef CBR_0
case 0: dcbSerialParams.BaudRate = CBR_0; break;
#endif
#ifdef CBR_50
case 50: dcbSerialParams.BaudRate = CBR_50; break;
#endif
#ifdef CBR_75
case 75: dcbSerialParams.BaudRate = CBR_75; break;
#endif
#ifdef CBR_110
case 110: dcbSerialParams.BaudRate = CBR_110; break;
#endif
#ifdef CBR_134
case 134: dcbSerialParams.BaudRate = CBR_134; break;
#endif
#ifdef CBR_150
case 150: dcbSerialParams.BaudRate = CBR_150; break;
#endif
#ifdef CBR_200
case 200: dcbSerialParams.BaudRate = CBR_200; break;
#endif
#ifdef CBR_300
case 300: dcbSerialParams.BaudRate = CBR_300; break;
#endif
#ifdef CBR_600
case 600: dcbSerialParams.BaudRate = CBR_600; break;
#endif
#ifdef CBR_1200
case 1200: dcbSerialParams.BaudRate = CBR_1200; break;
#endif
#ifdef CBR_1800
case 1800: dcbSerialParams.BaudRate = CBR_1800; break;
#endif
#ifdef CBR_2400
case 2400: dcbSerialParams.BaudRate = CBR_2400; break;
#endif
#ifdef CBR_4800
case 4800: dcbSerialParams.BaudRate = CBR_4800; break;
#endif
#ifdef CBR_7200
case 7200: dcbSerialParams.BaudRate = CBR_7200; break;
#endif
#ifdef CBR_9600
case 9600: dcbSerialParams.BaudRate = CBR_9600; break;
#endif
#ifdef CBR_14400
case 14400: dcbSerialParams.BaudRate = CBR_14400; break;
#endif
#ifdef CBR_19200
case 19200: dcbSerialParams.BaudRate = CBR_19200; break;
#endif
#ifdef CBR_28800
case 28800: dcbSerialParams.BaudRate = CBR_28800; break;
#endif
#ifdef CBR_57600
case 57600: dcbSerialParams.BaudRate = CBR_57600; break;
#endif
#ifdef CBR_76800
case 76800: dcbSerialParams.BaudRate = CBR_76800; break;
#endif
#ifdef CBR_38400
case 38400: dcbSerialParams.BaudRate = CBR_38400; break;
#endif
#ifdef CBR_115200
case 115200: dcbSerialParams.BaudRate = CBR_115200; break;
#endif
#ifdef CBR_128000
case 128000: dcbSerialParams.BaudRate = CBR_128000; break;
#endif
#ifdef CBR_153600
case 153600: dcbSerialParams.BaudRate = CBR_153600; break;
#endif
#ifdef CBR_230400
case 230400: dcbSerialParams.BaudRate = CBR_230400; break;
#endif
#ifdef CBR_256000
case 256000: dcbSerialParams.BaudRate = CBR_256000; break;
#endif
#ifdef CBR_460800
case 460800: dcbSerialParams.BaudRate = CBR_460800; break;
#endif
#ifdef CBR_921600
case 921600: dcbSerialParams.BaudRate = CBR_921600; break;
#endif
default:
// Try to blindly assign it
dcbSerialParams.BaudRate = baudrate_;
}
// setup char len
if (bytesize_ == eightbits)
dcbSerialParams.ByteSize = 8;
else if (bytesize_ == sevenbits)
dcbSerialParams.ByteSize = 7;
else if (bytesize_ == sixbits)
dcbSerialParams.ByteSize = 6;
else if (bytesize_ == fivebits)
dcbSerialParams.ByteSize = 5;
else
throw invalid_argument ("invalid char len");
// setup stopbits
if (stopbits_ == stopbits_one)
dcbSerialParams.StopBits = ONESTOPBIT;
else if (stopbits_ == stopbits_one_point_five)
dcbSerialParams.StopBits = ONE5STOPBITS;
else if (stopbits_ == stopbits_two)
dcbSerialParams.StopBits = TWOSTOPBITS;
else
throw invalid_argument ("invalid stop bit");
// setup parity
if (parity_ == parity_none) {
dcbSerialParams.Parity = NOPARITY;
} else if (parity_ == parity_even) {
dcbSerialParams.Parity = EVENPARITY;
} else if (parity_ == parity_odd) {
dcbSerialParams.Parity = ODDPARITY;
} else if (parity_ == parity_mark) {
dcbSerialParams.Parity = MARKPARITY;
} else if (parity_ == parity_space) {
dcbSerialParams.Parity = SPACEPARITY;
} else {
throw invalid_argument ("invalid parity");
}
// setup flowcontrol
if (flowcontrol_ == flowcontrol_none) {
dcbSerialParams.fOutxCtsFlow = false;
dcbSerialParams.fRtsControl = RTS_CONTROL_DISABLE;
dcbSerialParams.fOutX = false;
dcbSerialParams.fInX = false;
}
if (flowcontrol_ == flowcontrol_software) {
dcbSerialParams.fOutxCtsFlow = false;
dcbSerialParams.fRtsControl = RTS_CONTROL_DISABLE;
dcbSerialParams.fOutX = true;
dcbSerialParams.fInX = true;
}
if (flowcontrol_ == flowcontrol_hardware) {
dcbSerialParams.fOutxCtsFlow = true;
dcbSerialParams.fRtsControl = RTS_CONTROL_HANDSHAKE;
dcbSerialParams.fOutX = false;
dcbSerialParams.fInX = false;
}
// activate settings
if (!SetCommState(fd_, &dcbSerialParams)){
CloseHandle(fd_);
THROW (IOException, "Error setting serial port settings.");
}
// Setup timeouts
COMMTIMEOUTS timeouts = {0};
timeouts.ReadIntervalTimeout = timeout_.inter_byte_timeout;
timeouts.ReadTotalTimeoutConstant = timeout_.read_timeout_constant;
timeouts.ReadTotalTimeoutMultiplier = timeout_.read_timeout_multiplier;
timeouts.WriteTotalTimeoutConstant = timeout_.write_timeout_constant;
timeouts.WriteTotalTimeoutMultiplier = timeout_.write_timeout_multiplier;
if (!SetCommTimeouts(fd_, &timeouts)) {
THROW (IOException, "Error setting timeouts.");
}
}
void
Serial::SerialImpl::close ()
{
if (is_open_ == true) {
if (fd_ != INVALID_HANDLE_VALUE) {
int ret;
ret = CloseHandle(fd_);
if (ret == 0) {
stringstream ss;
ss << "Error while closing serial port: " << GetLastError();
THROW (IOException, ss.str().c_str());
} else {
fd_ = INVALID_HANDLE_VALUE;
}
}
is_open_ = false;
}
}
bool
Serial::SerialImpl::isOpen () const
{
return is_open_;
}
size_t
Serial::SerialImpl::available ()
{
if (!is_open_) {
return 0;
}
COMSTAT cs;
if (!ClearCommError(fd_, NULL, &cs)) {
stringstream ss;
ss << "Error while checking status of the serial port: " << GetLastError();
THROW (IOException, ss.str().c_str());
}
return static_cast<size_t>(cs.cbInQue);
}
bool
Serial::SerialImpl::waitReadable (uint32_t /*timeout*/)
{
THROW (IOException, "waitReadable is not implemented on Windows.");
return false;
}
void
Serial::SerialImpl::waitByteTimes (size_t /*count*/)
{
THROW (IOException, "waitByteTimes is not implemented on Windows.");
}
size_t
Serial::SerialImpl::read (uint8_t *buf, size_t size)
{
if (!is_open_) {
throw PortNotOpenedException ("Serial::read");
}
DWORD bytes_read;
if (!ReadFile(fd_, buf, static_cast<DWORD>(size), &bytes_read, NULL)) {
stringstream ss;
ss << "Error while reading from the serial port: " << GetLastError();
THROW (IOException, ss.str().c_str());
}
return (size_t) (bytes_read);
}
size_t
Serial::SerialImpl::write (const uint8_t *data, size_t length)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::write");
}
DWORD bytes_written;
if (!WriteFile(fd_, data, static_cast<DWORD>(length), &bytes_written, NULL)) {
stringstream ss;
ss << "Error while writing to the serial port: " << GetLastError();
THROW (IOException, ss.str().c_str());
}
return (size_t) (bytes_written);
}
void
Serial::SerialImpl::setPort (const string &port)
{
port_ = wstring(port.begin(), port.end());
}
string
Serial::SerialImpl::getPort () const
{
return string(port_.begin(), port_.end());
}
void
Serial::SerialImpl::setTimeout (serial::Timeout &timeout)
{
timeout_ = timeout;
if (is_open_) {
reconfigurePort ();
}
}
serial::Timeout
Serial::SerialImpl::getTimeout () const
{
return timeout_;
}
void
Serial::SerialImpl::setBaudrate (unsigned long baudrate)
{
baudrate_ = baudrate;
if (is_open_) {
reconfigurePort ();
}
}
unsigned long
Serial::SerialImpl::getBaudrate () const
{
return baudrate_;
}
void
Serial::SerialImpl::setBytesize (serial::bytesize_t bytesize)
{
bytesize_ = bytesize;
if (is_open_) {
reconfigurePort ();
}
}
serial::bytesize_t
Serial::SerialImpl::getBytesize () const
{
return bytesize_;
}
void
Serial::SerialImpl::setParity (serial::parity_t parity)
{
parity_ = parity;
if (is_open_) {
reconfigurePort ();
}
}
serial::parity_t
Serial::SerialImpl::getParity () const
{
return parity_;
}
void
Serial::SerialImpl::setStopbits (serial::stopbits_t stopbits)
{
stopbits_ = stopbits;
if (is_open_) {
reconfigurePort ();
}
}
serial::stopbits_t
Serial::SerialImpl::getStopbits () const
{
return stopbits_;
}
void
Serial::SerialImpl::setFlowcontrol (serial::flowcontrol_t flowcontrol)
{
flowcontrol_ = flowcontrol;
if (is_open_) {
reconfigurePort ();
}
}
serial::flowcontrol_t
Serial::SerialImpl::getFlowcontrol () const
{
return flowcontrol_;
}
void
Serial::SerialImpl::flush ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::flush");
}
FlushFileBuffers (fd_);
}
void
Serial::SerialImpl::flushInput ()
{
if (is_open_ == false) {
throw PortNotOpenedException("Serial::flushInput");
}
PurgeComm(fd_, PURGE_RXCLEAR);
}
void
Serial::SerialImpl::flushOutput ()
{
if (is_open_ == false) {
throw PortNotOpenedException("Serial::flushOutput");
}
PurgeComm(fd_, PURGE_TXCLEAR);
}
void
Serial::SerialImpl::sendBreak (int /*duration*/)
{
THROW (IOException, "sendBreak is not supported on Windows.");
}
void
Serial::SerialImpl::setBreak (bool level)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::setBreak");
}
if (level) {
EscapeCommFunction (fd_, SETBREAK);
} else {
EscapeCommFunction (fd_, CLRBREAK);
}
}
void
Serial::SerialImpl::setRTS (bool level)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::setRTS");
}
if (level) {
EscapeCommFunction (fd_, SETRTS);
} else {
EscapeCommFunction (fd_, CLRRTS);
}
}
void
Serial::SerialImpl::setDTR (bool level)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::setDTR");
}
if (level) {
EscapeCommFunction (fd_, SETDTR);
} else {
EscapeCommFunction (fd_, CLRDTR);
}
}
bool
Serial::SerialImpl::waitForChange ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::waitForChange");
}
DWORD dwCommEvent;
if (!SetCommMask(fd_, EV_CTS | EV_DSR | EV_RING | EV_RLSD)) {
// Error setting communications mask
return false;
}
if (!WaitCommEvent(fd_, &dwCommEvent, NULL)) {
// An error occurred waiting for the event.
return false;
} else {
// Event has occurred.
return true;
}
}
bool
Serial::SerialImpl::getCTS ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getCTS");
}
DWORD dwModemStatus;
if (!GetCommModemStatus(fd_, &dwModemStatus)) {
THROW (IOException, "Error getting the status of the CTS line.");
}
return (MS_CTS_ON & dwModemStatus) != 0;
}
bool
Serial::SerialImpl::getDSR ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getDSR");
}
DWORD dwModemStatus;
if (!GetCommModemStatus(fd_, &dwModemStatus)) {
THROW (IOException, "Error getting the status of the DSR line.");
}
return (MS_DSR_ON & dwModemStatus) != 0;
}
bool
Serial::SerialImpl::getRI()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getRI");
}
DWORD dwModemStatus;
if (!GetCommModemStatus(fd_, &dwModemStatus)) {
THROW (IOException, "Error getting the status of the RI line.");
}
return (MS_RING_ON & dwModemStatus) != 0;
}
bool
Serial::SerialImpl::getCD()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getCD");
}
DWORD dwModemStatus;
if (!GetCommModemStatus(fd_, &dwModemStatus)) {
// Error in GetCommModemStatus;
THROW (IOException, "Error getting the status of the CD line.");
}
return (MS_RLSD_ON & dwModemStatus) != 0;
}
void
Serial::SerialImpl::readLock()
{
if (WaitForSingleObject(read_mutex, INFINITE) != WAIT_OBJECT_0) {
THROW (IOException, "Error claiming read mutex.");
}
}
void
Serial::SerialImpl::readUnlock()
{
if (!ReleaseMutex(read_mutex)) {
THROW (IOException, "Error releasing read mutex.");
}
}
void
Serial::SerialImpl::writeLock()
{
if (WaitForSingleObject(write_mutex, INFINITE) != WAIT_OBJECT_0) {
THROW (IOException, "Error claiming write mutex.");
}
}
void
Serial::SerialImpl::writeUnlock()
{
if (!ReleaseMutex(write_mutex)) {
THROW (IOException, "Error releasing write mutex.");
}
}
#endif // #if defined(_WIN32)

View File

@ -0,0 +1,432 @@
/* Copyright 2012 William Woodall and John Harrison */
#include <algorithm>
#if !defined(_WIN32) && !defined(__OpenBSD__) && !defined(__FreeBSD__)
# include <alloca.h>
#endif
#if defined (__MINGW32__)
# define alloca __builtin_alloca
#endif
#include "serial/serial.h"
#ifdef _WIN32
#include "serial/impl/win.h"
#else
#include "serial/impl/unix.h"
#endif
using std::invalid_argument;
using std::min;
using std::numeric_limits;
using std::vector;
using std::size_t;
using std::string;
using serial::Serial;
using serial::SerialException;
using serial::IOException;
using serial::bytesize_t;
using serial::parity_t;
using serial::stopbits_t;
using serial::flowcontrol_t;
class Serial::ScopedReadLock {
public:
ScopedReadLock(SerialImpl *pimpl) : pimpl_(pimpl) {
this->pimpl_->readLock();
}
~ScopedReadLock() {
this->pimpl_->readUnlock();
}
private:
// Disable copy constructors
ScopedReadLock(const ScopedReadLock&);
const ScopedReadLock& operator=(ScopedReadLock);
SerialImpl *pimpl_;
};
class Serial::ScopedWriteLock {
public:
ScopedWriteLock(SerialImpl *pimpl) : pimpl_(pimpl) {
this->pimpl_->writeLock();
}
~ScopedWriteLock() {
this->pimpl_->writeUnlock();
}
private:
// Disable copy constructors
ScopedWriteLock(const ScopedWriteLock&);
const ScopedWriteLock& operator=(ScopedWriteLock);
SerialImpl *pimpl_;
};
Serial::Serial (const string &port, uint32_t baudrate, serial::Timeout timeout,
bytesize_t bytesize, parity_t parity, stopbits_t stopbits,
flowcontrol_t flowcontrol)
: pimpl_(new SerialImpl (port, baudrate, bytesize, parity,
stopbits, flowcontrol))
{
pimpl_->setTimeout(timeout);
}
Serial::~Serial ()
{
delete pimpl_;
}
void
Serial::open ()
{
pimpl_->open ();
}
void
Serial::close ()
{
pimpl_->close ();
}
bool
Serial::isOpen () const
{
return pimpl_->isOpen ();
}
size_t
Serial::available ()
{
return pimpl_->available ();
}
bool
Serial::waitReadable ()
{
serial::Timeout timeout(pimpl_->getTimeout ());
return pimpl_->waitReadable(timeout.read_timeout_constant);
}
void
Serial::waitByteTimes (size_t count)
{
pimpl_->waitByteTimes(count);
}
size_t
Serial::read_ (uint8_t *buffer, size_t size)
{
return this->pimpl_->read (buffer, size);
}
size_t
Serial::read (uint8_t *buffer, size_t size)
{
ScopedReadLock lock(this->pimpl_);
return this->pimpl_->read (buffer, size);
}
size_t
Serial::read (std::vector<uint8_t> &buffer, size_t size)
{
ScopedReadLock lock(this->pimpl_);
uint8_t *buffer_ = new uint8_t[size];
size_t bytes_read = 0;
try {
bytes_read = this->pimpl_->read (buffer_, size);
}
catch (const std::exception &e) {
delete[] buffer_;
throw;
}
buffer.insert (buffer.end (), buffer_, buffer_+bytes_read);
delete[] buffer_;
return bytes_read;
}
size_t
Serial::read (std::string &buffer, size_t size)
{
ScopedReadLock lock(this->pimpl_);
uint8_t *buffer_ = new uint8_t[size];
size_t bytes_read = 0;
try {
bytes_read = this->pimpl_->read (buffer_, size);
}
catch (const std::exception &e) {
delete[] buffer_;
throw;
}
buffer.append (reinterpret_cast<const char*>(buffer_), bytes_read);
delete[] buffer_;
return bytes_read;
}
string
Serial::read (size_t size)
{
std::string buffer;
this->read (buffer, size);
return buffer;
}
size_t
Serial::readline (string &buffer, size_t size, string eol)
{
ScopedReadLock lock(this->pimpl_);
size_t eol_len = eol.length ();
uint8_t *buffer_ = static_cast<uint8_t*>
(alloca (size * sizeof (uint8_t)));
size_t read_so_far = 0;
while (true)
{
size_t bytes_read = this->read_ (buffer_ + read_so_far, 1);
read_so_far += bytes_read;
if (bytes_read == 0) {
break; // Timeout occured on reading 1 byte
}
if(read_so_far < eol_len) continue;
if (string (reinterpret_cast<const char*>
(buffer_ + read_so_far - eol_len), eol_len) == eol) {
break; // EOL found
}
if (read_so_far == size) {
break; // Reached the maximum read length
}
}
buffer.append(reinterpret_cast<const char*> (buffer_), read_so_far);
return read_so_far;
}
string
Serial::readline (size_t size, string eol)
{
std::string buffer;
this->readline (buffer, size, eol);
return buffer;
}
vector<string>
Serial::readlines (size_t size, string eol)
{
ScopedReadLock lock(this->pimpl_);
std::vector<std::string> lines;
size_t eol_len = eol.length ();
uint8_t *buffer_ = static_cast<uint8_t*>
(alloca (size * sizeof (uint8_t)));
size_t read_so_far = 0;
size_t start_of_line = 0;
while (read_so_far < size) {
size_t bytes_read = this->read_ (buffer_+read_so_far, 1);
read_so_far += bytes_read;
if (bytes_read == 0) {
if (start_of_line != read_so_far) {
lines.push_back (
string (reinterpret_cast<const char*> (buffer_ + start_of_line),
read_so_far - start_of_line));
}
break; // Timeout occured on reading 1 byte
}
if(read_so_far < eol_len) continue;
if (string (reinterpret_cast<const char*>
(buffer_ + read_so_far - eol_len), eol_len) == eol) {
// EOL found
lines.push_back(
string(reinterpret_cast<const char*> (buffer_ + start_of_line),
read_so_far - start_of_line));
start_of_line = read_so_far;
}
if (read_so_far == size) {
if (start_of_line != read_so_far) {
lines.push_back(
string(reinterpret_cast<const char*> (buffer_ + start_of_line),
read_so_far - start_of_line));
}
break; // Reached the maximum read length
}
}
return lines;
}
size_t
Serial::write (const string &data)
{
ScopedWriteLock lock(this->pimpl_);
return this->write_ (reinterpret_cast<const uint8_t*>(data.c_str()),
data.length());
}
size_t
Serial::write (const std::vector<uint8_t> &data)
{
ScopedWriteLock lock(this->pimpl_);
return this->write_ (&data[0], data.size());
}
size_t
Serial::write (const uint8_t *data, size_t size)
{
ScopedWriteLock lock(this->pimpl_);
return this->write_(data, size);
}
size_t
Serial::write_ (const uint8_t *data, size_t length)
{
return pimpl_->write (data, length);
}
void
Serial::setPort (const string &port)
{
ScopedReadLock rlock(this->pimpl_);
ScopedWriteLock wlock(this->pimpl_);
bool was_open = pimpl_->isOpen ();
if (was_open) close();
pimpl_->setPort (port);
if (was_open) open ();
}
string
Serial::getPort () const
{
return pimpl_->getPort ();
}
void
Serial::setTimeout (serial::Timeout &timeout)
{
pimpl_->setTimeout (timeout);
}
serial::Timeout
Serial::getTimeout () const {
return pimpl_->getTimeout ();
}
void
Serial::setBaudrate (uint32_t baudrate)
{
pimpl_->setBaudrate (baudrate);
}
uint32_t
Serial::getBaudrate () const
{
return uint32_t(pimpl_->getBaudrate ());
}
void
Serial::setBytesize (bytesize_t bytesize)
{
pimpl_->setBytesize (bytesize);
}
bytesize_t
Serial::getBytesize () const
{
return pimpl_->getBytesize ();
}
void
Serial::setParity (parity_t parity)
{
pimpl_->setParity (parity);
}
parity_t
Serial::getParity () const
{
return pimpl_->getParity ();
}
void
Serial::setStopbits (stopbits_t stopbits)
{
pimpl_->setStopbits (stopbits);
}
stopbits_t
Serial::getStopbits () const
{
return pimpl_->getStopbits ();
}
void
Serial::setFlowcontrol (flowcontrol_t flowcontrol)
{
pimpl_->setFlowcontrol (flowcontrol);
}
flowcontrol_t
Serial::getFlowcontrol () const
{
return pimpl_->getFlowcontrol ();
}
void Serial::flush ()
{
ScopedReadLock rlock(this->pimpl_);
ScopedWriteLock wlock(this->pimpl_);
pimpl_->flush ();
}
void Serial::flushInput ()
{
ScopedReadLock lock(this->pimpl_);
pimpl_->flushInput ();
}
void Serial::flushOutput ()
{
ScopedWriteLock lock(this->pimpl_);
pimpl_->flushOutput ();
}
void Serial::sendBreak (int duration)
{
pimpl_->sendBreak (duration);
}
void Serial::setBreak (bool level)
{
pimpl_->setBreak (level);
}
void Serial::setRTS (bool level)
{
pimpl_->setRTS (level);
}
void Serial::setDTR (bool level)
{
pimpl_->setDTR (level);
}
bool Serial::waitForChange()
{
return pimpl_->waitForChange();
}
bool Serial::getCTS ()
{
return pimpl_->getCTS ();
}
bool Serial::getDSR ()
{
return pimpl_->getDSR ();
}
bool Serial::getRI ()
{
return pimpl_->getRI ();
}
bool Serial::getCD ()
{
return pimpl_->getCD ();
}

View File

@ -0,0 +1,212 @@
/*
* @Description :
* @Author : Aiyangsky
* @Date : 2022-08-26 21:42:10
* @LastEditors : Aiyangsky
* @LastEditTime : 2022-08-27 03:43:49
* @FilePath : \mavlink\src\route\Ring_Fifo.c
*/
#include <string.h>
#include "Ring_Fifo.h"
/**
* @description:
* @param {RING_FIFO_CB_T} *fifo fifo struct pointer
* @param {unsigned short} cell_size sizeof(cell)
* @param {unsigned char} *buffer fifo buffer address
* @param {unsigned int} buffer_lenght sizeof(buffer)
* @return {*}
* @note :
*/
void Ring_Fifo_init(RING_FIFO_CB_T *fifo, unsigned short cell_size,
unsigned char *buffer, unsigned int buffer_lenght)
{
fifo->cell_size = cell_size;
fifo->start = buffer;
// Remainder is taken to avoid splicing in the output so as to improve the efficiency
fifo->end = buffer + buffer_lenght - (buffer_lenght % cell_size);
fifo->in = buffer;
fifo->out = buffer;
fifo->curr_number = 0;
fifo->max_number = buffer_lenght / cell_size;
}
/**
* @description: add a cell to fifo
* @param {RING_FIFO_CB_T} *fifo fifo struct pointer
* @param {void} *data cell data [in]
* @return {*} Success or fail
* @note : failed if without space
*/
bool Ring_Fifo_in_cell(RING_FIFO_CB_T *fifo, void *data)
{
unsigned char *next;
unsigned char *ptemp = fifo->in;
bool ret = false;
LOCK();
if (fifo->curr_number < fifo->max_number)
{
next = fifo->in + fifo->cell_size;
if (next >= fifo->end)
{
next = fifo->start;
}
fifo->in = next;
fifo->curr_number++;
memcpy(ptemp, data, fifo->cell_size);
ret = true;
}
UNLOCK();
return ret;
}
/**
* @description: add a series of cells to fifo
* @param {RING_FIFO_CB_T} *fifo
* @param {void} *data cells data [in]
* @param {unsigned short} number expect add number of cells
* @return {*} number of successful add
* @note :
*/
unsigned short Ring_Fifo_in_cells(RING_FIFO_CB_T *fifo, void *data, unsigned short number)
{
// Number of remaining storable cells is described to simplify the calculation in the copying process.
unsigned short diff = fifo->max_number - fifo->curr_number;
unsigned short count_temp, count_temp_r;
unsigned char *next;
unsigned char *ptemp = fifo->in;
unsigned short ret;
LOCK();
if (diff > number)
{
ret = number;
}
else if (diff > 0 && diff < number)
{
ret = diff;
}
else
{
ret = 0;
}
count_temp = fifo->cell_size * ret;
next = fifo->in + count_temp;
// Moving the write pointer and the number of stored cells before
// copying data reduces the likelihood of multithreaded write conflicts.
fifo->curr_number += ret;
if (next < fifo->end)
{
fifo->in = next;
memcpy(ptemp, data, count_temp);
}
else
{
count_temp_r = fifo->end - fifo->in;
next = fifo->start + count_temp - count_temp_r;
fifo->in = next;
memcpy(ptemp, data, count_temp_r);
memcpy(fifo->start, ((unsigned char *)data) + count_temp_r, count_temp - count_temp_r);
}
UNLOCK();
return ret;
}
/**
* @description: output a cell
* @param {RING_FIFO_CB_T} *fifo
* @param {void} *data cell data [out]
* @return {*} Success or fail
* @note : fail if without cell
*/
bool Ring_Fifo_out_cell(RING_FIFO_CB_T *fifo, void *data)
{
unsigned char *next;
unsigned char *ptemp = fifo->out;
bool ret = false;
LOCK();
if (fifo->curr_number > 0)
{
next = fifo->out + fifo->cell_size;
if (next >= fifo->end)
{
next = fifo->start;
}
fifo->out = next;
fifo->curr_number--;
memcpy(data, ptemp, fifo->cell_size);
ret = true;
}
UNLOCK();
return ret;
}
/**
* @description: output a series of cells in fifo
* @param {RING_FIFO_CB_T} *fifo
* @param {void} *data cells data [out]
* @param {unsigned short} number expect out number of cells
* @return {*} number of successful output
* @note :
*/
unsigned short Ring_Fifo_out_cells(RING_FIFO_CB_T *fifo, void *data, unsigned short number)
{
unsigned char *next;
unsigned char *ptemp = fifo->out;
unsigned short count_temp, count_temp_r;
unsigned short ret;
LOCK();
if (fifo->curr_number > number)
{
ret = number;
}
else if (fifo->curr_number < number && fifo->curr_number > 0)
{
ret = fifo->curr_number;
}
else
{
ret = 0;
}
count_temp = fifo->cell_size * ret;
next = fifo->out + count_temp;
fifo->curr_number -= ret;
if (next < fifo->end)
{
fifo->out = next;
memcpy(data, ptemp, count_temp);
}
else
{
count_temp_r = fifo->end - fifo->in;
next = fifo->start + count_temp - count_temp_r;
fifo->out = next;
memcpy(data, ptemp, count_temp_r);
memcpy(((unsigned char *)data) + count_temp_r, fifo->start, count_temp - count_temp_r);
}
UNLOCK();
return ret;
}

View File

@ -0,0 +1,47 @@
/*
* @Description :
* @Author : Aiyangsky
* @Date : 2022-08-26 21:42:02
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-03 16:12:37
* @FilePath: \host\gimbal-sdk-multi-platform\src\FIFO\Ring_Fifo.h
*/
#ifndef RING_FIFO_H
#define RING_FIFO_H
#include "stdbool.h"
#ifdef __cplusplus
extern "C"
{
#endif
#define LOCK()
#define UNLOCK()
typedef struct
{
unsigned char *start;
unsigned char *in;
unsigned char *out;
unsigned char *end;
unsigned short curr_number;
unsigned short max_number;
unsigned short cell_size;
} RING_FIFO_CB_T;
void Ring_Fifo_init(RING_FIFO_CB_T *fifo, unsigned short cell_size,
unsigned char *buffer, unsigned int buffer_lenght);
bool Ring_Fifo_in_cell(RING_FIFO_CB_T *fifo, void *data);
unsigned short Ring_Fifo_in_cells(RING_FIFO_CB_T *fifo, void *data, unsigned short number);
bool Ring_Fifo_out_cell(RING_FIFO_CB_T *fifo, void *data);
unsigned short Ring_Fifo_out_cells(RING_FIFO_CB_T *fifo, void *data, unsigned short number);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,93 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2022-10-27 18:10:06
* @LastEditors: L LC @amov
* @LastEditTime: 2022-10-28 14:10:02
* @FilePath: \amov-gimbal-sdk\src\G1\g1_gimbal_crc32.h
*/
#ifndef G1_GIMBAL_CRC32_H
#define G1_GIMBAL_CRC32_H
namespace G1
{
const unsigned int Crc32Table[256] = {
0x00000000, 0x04C11DB7, 0x09823B6E, 0x0D4326D9, 0x130476DC, 0x17C56B6B,
0x1A864DB2, 0x1E475005, 0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6, 0x2B4BCB61,
0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD, 0x4C11DB70, 0x48D0C6C7,
0x4593E01E, 0x4152FDA9, 0x5F15ADAC, 0x5BD4B01B, 0x569796C2, 0x52568B75,
0x6A1936C8, 0x6ED82B7F, 0x639B0DA6, 0x675A1011, 0x791D4014, 0x7DDC5DA3,
0x709F7B7A, 0x745E66CD, 0x9823B6E0, 0x9CE2AB57, 0x91A18D8E, 0x95609039,
0x8B27C03C, 0x8FE6DD8B, 0x82A5FB52, 0x8664E6E5, 0xBE2B5B58, 0xBAEA46EF,
0xB7A96036, 0xB3687D81, 0xAD2F2D84, 0xA9EE3033, 0xA4AD16EA, 0xA06C0B5D,
0xD4326D90, 0xD0F37027, 0xDDB056FE, 0xD9714B49, 0xC7361B4C, 0xC3F706FB,
0xCEB42022, 0xCA753D95, 0xF23A8028, 0xF6FB9D9F, 0xFBB8BB46, 0xFF79A6F1,
0xE13EF6F4, 0xE5FFEB43, 0xE8BCCD9A, 0xEC7DD02D, 0x34867077, 0x30476DC0,
0x3D044B19, 0x39C556AE, 0x278206AB, 0x23431B1C, 0x2E003DC5, 0x2AC12072,
0x128E9DCF, 0x164F8078, 0x1B0CA6A1, 0x1FCDBB16, 0x018AEB13, 0x054BF6A4,
0x0808D07D, 0x0CC9CDCA, 0x7897AB07, 0x7C56B6B0, 0x71159069, 0x75D48DDE,
0x6B93DDDB, 0x6F52C06C, 0x6211E6B5, 0x66D0FB02, 0x5E9F46BF, 0x5A5E5B08,
0x571D7DD1, 0x53DC6066, 0x4D9B3063, 0x495A2DD4, 0x44190B0D, 0x40D816BA,
0xACA5C697, 0xA864DB20, 0xA527FDF9, 0xA1E6E04E, 0xBFA1B04B, 0xBB60ADFC,
0xB6238B25, 0xB2E29692, 0x8AAD2B2F, 0x8E6C3698, 0x832F1041, 0x87EE0DF6,
0x99A95DF3, 0x9D684044, 0x902B669D, 0x94EA7B2A, 0xE0B41DE7, 0xE4750050,
0xE9362689, 0xEDF73B3E, 0xF3B06B3B, 0xF771768C, 0xFA325055, 0xFEF34DE2,
0xC6BCF05F, 0xC27DEDE8, 0xCF3ECB31, 0xCBFFD686, 0xD5B88683, 0xD1799B34,
0xDC3ABDED, 0xD8FBA05A, 0x690CE0EE, 0x6DCDFD59, 0x608EDB80, 0x644FC637,
0x7A089632, 0x7EC98B85, 0x738AAD5C, 0x774BB0EB, 0x4F040D56, 0x4BC510E1,
0x46863638, 0x42472B8F, 0x5C007B8A, 0x58C1663D, 0x558240E4, 0x51435D53,
0x251D3B9E, 0x21DC2629, 0x2C9F00F0, 0x285E1D47, 0x36194D42, 0x32D850F5,
0x3F9B762C, 0x3B5A6B9B, 0x0315D626, 0x07D4CB91, 0x0A97ED48, 0x0E56F0FF,
0x1011A0FA, 0x14D0BD4D, 0x19939B94, 0x1D528623, 0xF12F560E, 0xF5EE4BB9,
0xF8AD6D60, 0xFC6C70D7, 0xE22B20D2, 0xE6EA3D65, 0xEBA91BBC, 0xEF68060B,
0xD727BBB6, 0xD3E6A601, 0xDEA580D8, 0xDA649D6F, 0xC423CD6A, 0xC0E2D0DD,
0xCDA1F604, 0xC960EBB3, 0xBD3E8D7E, 0xB9FF90C9, 0xB4BCB610, 0xB07DABA7,
0xAE3AFBA2, 0xAAFBE615, 0xA7B8C0CC, 0xA379DD7B, 0x9B3660C6, 0x9FF77D71,
0x92B45BA8, 0x9675461F, 0x8832161A, 0x8CF30BAD, 0x81B02D74, 0x857130C3,
0x5D8A9099, 0x594B8D2E, 0x5408ABF7, 0x50C9B640, 0x4E8EE645, 0x4A4FFBF2,
0x470CDD2B, 0x43CDC09C, 0x7B827D21, 0x7F436096, 0x7200464F, 0x76C15BF8,
0x68860BFD, 0x6C47164A, 0x61043093, 0x65C52D24, 0x119B4BE9, 0x155A565E,
0x18197087, 0x1CD86D30, 0x029F3D35, 0x065E2082, 0x0B1D065B, 0x0FDC1BEC,
0x3793A651, 0x3352BBE6, 0x3E119D3F, 0x3AD08088, 0x2497D08D, 0x2056CD3A,
0x2D15EBE3, 0x29D4F654, 0xC5A92679, 0xC1683BCE, 0xCC2B1D17, 0xC8EA00A0,
0xD6AD50A5, 0xD26C4D12, 0xDF2F6BCB, 0xDBEE767C, 0xE3A1CBC1, 0xE760D676,
0xEA23F0AF, 0xEEE2ED18, 0xF0A5BD1D, 0xF464A0AA, 0xF9278673, 0xFDE69BC4,
0x89B8FD09, 0x8D79E0BE, 0x803AC667, 0x84FBDBD0, 0x9ABC8BD5, 0x9E7D9662,
0x933EB0BB, 0x97FFAD0C, 0xAFB010B1, 0xAB710D06, 0xA6322BDF, 0xA2F33668,
0xBCB4666D, 0xB8757BDA, 0xB5365D03, 0xB1F740B4};
static inline unsigned int CRC32Software(const unsigned char *pData, unsigned short Length)
{
unsigned int nReg;
unsigned int nTemp = 0;
unsigned short i, n;
nReg = 0xFFFFFFFF;
for (n = 0; n < Length; n++)
{
nReg ^= (unsigned int)pData[n];
for (i = 0; i < 4; i++)
{
nTemp = Crc32Table[(unsigned char)((nReg >> 24) & 0xff)];
nReg <<= 8;
nReg ^= nTemp;
}
}
return nReg;
}
static inline unsigned char CheckSum(unsigned char *pData, unsigned short Lenght)
{
unsigned short temp = 0;
unsigned short i = 0;
for (i = 0; i < Lenght; i++)
{
temp += pData[i];
}
return temp & 0XFF;
}
} // namespace name
#endif

View File

@ -0,0 +1,245 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2022-10-27 18:10:06
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-18 10:12:46
* @FilePath: /gimbal-sdk-multi-platform/src/G1/g1_gimbal_driver.cpp
*/
#include "g1_gimbal_driver.h"
#include "g1_gimbal_crc32.h"
#include "string.h"
/**
* The function creates a new instance of the g1GimbalDriver class, which is a subclass of the
* IamovGimbalBase class
*
* @param _IO The IOStreamBase object that will be used to communicate with the gimbal.
*/
g1GimbalDriver::g1GimbalDriver(amovGimbal::IOStreamBase *_IO) : amovGimbal::IamovGimbalBase(_IO)
{
memset(&rxQueue, 0, sizeof(RING_FIFO_CB_T));
memset(&txQueue, 0, sizeof(RING_FIFO_CB_T));
rxBuffer = (uint8_t *)malloc(MAX_QUEUE_SIZE * sizeof(G1::GIMBAL_FRAME_T));
if (rxBuffer == NULL)
{
std::cout << "Receive buffer creation failed! Size : " << MAX_QUEUE_SIZE << std::endl;
exit(1);
}
txBuffer = (uint8_t *)malloc(MAX_QUEUE_SIZE * sizeof(G1::GIMBAL_FRAME_T));
if (txBuffer == NULL)
{
free(rxBuffer);
std::cout << "Send buffer creation failed! Size : " << MAX_QUEUE_SIZE << std::endl;
exit(1);
}
Ring_Fifo_init(&rxQueue, sizeof(G1::GIMBAL_FRAME_T), rxBuffer, MAX_QUEUE_SIZE * sizeof(G1::GIMBAL_FRAME_T));
Ring_Fifo_init(&txQueue, sizeof(G1::GIMBAL_FRAME_T), txBuffer, MAX_QUEUE_SIZE * sizeof(G1::GIMBAL_FRAME_T));
parserState = G1::GIMBAL_SERIAL_STATE_IDLE;
}
/**
* The function takes a command, a pointer to a payload, and the size of the payload. It then copies
* the payload into the tx buffer, calculates the checksum, and then calculates the CRC32 of the
* payload. It then copies the CRC32 into the tx buffer, and then copies the tx buffer into the txQueue
*
* @param uint32_t 4 bytes
* @param pPayload pointer to the data to be sent
* @param payloadSize the size of the payload
*
* @return The size of the data to be sent.
*/
uint32_t g1GimbalDriver::pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payloadSize)
{
uint32_t ret = 0;
G1::GIMBAL_FRAME_T txTemp;
txTemp.head = G1_SERIAL_HEAD;
txTemp.version = G1_SERIAL_VERSION;
txTemp.lenght = payloadSize;
txTemp.cmd = cmd;
txTemp.checksum = G1::CheckSum((unsigned char *)&txTemp.version, 3);
memcpy(txTemp.payload, pPayload, payloadSize);
txTemp.crc.u32 = G1::CRC32Software(txTemp.payload, payloadSize);
memcpy(txTemp.payload + payloadSize, txTemp.crc.u8, sizeof(uint32_t));
txMutex.lock();
if (Ring_Fifo_in_cell(&txQueue, &txTemp))
{
ret = txTemp.lenght + G1_PAYLOAD_OFFSET + sizeof(uint32_t);
}
txMutex.unlock();
return ret;
}
/**
* > This function is used to get a packet from the receive queue
*
* @param void This is the type of data that will be stored in the queue.
*
* @return A boolean value.
*/
bool g1GimbalDriver::getRxPack(OUT void *pack)
{
bool state = false;
rxMutex.lock();
state = Ring_Fifo_out_cell(&rxQueue, pack);
rxMutex.unlock();
return state;
}
void g1GimbalDriver::convert(void *buf)
{
G1::GIMBAL_FRAME_T *temp;
temp = reinterpret_cast<G1::GIMBAL_FRAME_T *>(buf);
switch (temp->cmd)
{
case G1::GIMBAL_CMD_RCV_POS:
G1::GIMBAL_RCV_POS_MSG_T *tempPos;
tempPos = reinterpret_cast<G1::GIMBAL_RCV_POS_MSG_T *>(((uint8_t *)buf) + G1_PAYLOAD_OFFSET);
mState.lock();
state.abs.yaw = tempPos->IMU_yaw * G1_SCALE_FACTOR;
state.abs.roll = tempPos->IMU_roll * G1_SCALE_FACTOR;
state.abs.pitch = tempPos->IMU_pitch * G1_SCALE_FACTOR;
state.rel.yaw = tempPos->HALL_yaw * G1_SCALE_FACTOR;
state.rel.roll = tempPos->HALL_roll * G1_SCALE_FACTOR;
state.rel.pitch = tempPos->HALL_pitch * G1_SCALE_FACTOR;
updateGimbalStateCallback(state.abs.roll, state.abs.pitch, state.abs.yaw,
state.rel.roll, state.rel.pitch, state.rel.yaw,
state.fov.x, state.fov.y);
mState.unlock();
break;
default:
std::cout << "Undefined frame from G1 : ";
for (uint16_t i = 0; i < temp->lenght + G1_PAYLOAD_OFFSET + sizeof(uint32_t); i++)
{
printf("%02X ", ((uint8_t *)buf)[i]);
}
std::cout << std::endl;
break;
}
}
/**
* The function is called by the main thread to send a command to the gimbal.
*
* The function first checks to see if the serial port is busy and if it is open. If it is not busy and
* it is open, the function locks the txMutex and then checks to see if there is a command in the
* txQueue. If there is a command in the txQueue, the function copies the command to the tx buffer and
* then unlocks the txMutex. The function then sends the command to the gimbal.
*
* The txQueue is a ring buffer that holds commands that are waiting to be sent to the gimbal. The
* txQueue is a ring buffer because the gimbal can only process one command at a time. If the gimbal is
* busy processing a command, the command will be placed in the txQueue and sent to the gimbal when the
* gimbal is ready to receive the command.
*/
void g1GimbalDriver::send(void)
{
if (!IO->isBusy() && IO->isOpen())
{
bool state = false;
txMutex.lock();
state = Ring_Fifo_out_cell(&txQueue, &tx);
txMutex.unlock();
if (state)
{
IO->outPutBytes((uint8_t *)&tx, tx.lenght + G1_PAYLOAD_OFFSET + sizeof(uint32_t));
}
}
}
/**
* It's a state machine that parses a serial stream of bytes into a struct
*
* @param uint8_t unsigned char
*
* @return A boolean value.
*/
bool g1GimbalDriver::parser(IN uint8_t byte)
{
bool state = false;
static uint8_t payloadLenghte = 0;
static uint8_t *pRx = NULL;
switch (parserState)
{
case G1::GIMBAL_SERIAL_STATE_IDLE:
if (byte == G1_SERIAL_HEAD)
{
rx.head = byte;
parserState = G1::GIMBAL_SERIAL_STATE_VERSION;
}
break;
case G1::GIMBAL_SERIAL_STATE_VERSION:
if (byte == G1_SERIAL_VERSION)
{
rx.version = byte;
payloadLenghte = 0;
parserState = G1::GIMBAL_SERIAL_STATE_LENGHT;
}
else
{
rx.head = 0;
parserState = G1::GIMBAL_SERIAL_STATE_IDLE;
}
break;
case G1::GIMBAL_SERIAL_STATE_LENGHT:
payloadLenghte = byte + 4;
rx.lenght = byte;
parserState = G1::GIMBAL_SERIAL_STATE_CMD;
break;
case G1::GIMBAL_SERIAL_STATE_CMD:
rx.cmd = byte;
parserState = G1::GIMBAL_SERIAL_STATE_CHECK;
break;
case G1::GIMBAL_SERIAL_STATE_CHECK:
rx.checksum = byte;
if (G1::CheckSum((unsigned char *)&rx.version, 3) == byte)
{
parserState = G1::GIMBAL_SERIAL_STATE_PAYLOAD;
pRx = rx.payload;
}
else
{
memset(&rx, 0, 5);
parserState = G1::GIMBAL_SERIAL_STATE_IDLE;
}
break;
case G1::GIMBAL_SERIAL_STATE_PAYLOAD:
*pRx = byte;
payloadLenghte--;
pRx++;
if (payloadLenghte <= 0)
{
if (*((uint32_t *)(pRx - sizeof(uint32_t))) == G1::CRC32Software(rx.payload, rx.lenght))
{
state = true;
rxMutex.lock();
Ring_Fifo_in_cell(&rxQueue, &rx);
rxMutex.unlock();
}
else
{
memset(&rx, 0, sizeof(G1::GIMBAL_FRAME_T));
}
parserState = G1::GIMBAL_SERIAL_STATE_IDLE;
}
break;
default:
parserState = G1::GIMBAL_SERIAL_STATE_IDLE;
break;
}
return state;
}

View File

@ -0,0 +1,68 @@
/*
* @Description: G1吊舱的驱动文件
* @Author: L LC @amov
* @Date: 2022-10-28 12:24:21
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-13 12:29:17
* @FilePath: \gimbal-sdk-multi-platform\src\G1\g1_gimbal_driver.h
*/
#include "../amov_gimbal.h"
#include "g1_gimbal_struct.h"
#include <mutex>
#include <malloc.h>
#include <iostream>
#ifndef __G1_DRIVER_H
#define __G1_DRIVER_H
extern "C"
{
#include "Ring_Fifo.h"
}
class g1GimbalDriver : protected amovGimbal::IamovGimbalBase
{
private:
G1::GIMBAL_CMD_PARSER_STATE_T parserState;
G1::GIMBAL_FRAME_T rx;
G1::GIMBAL_FRAME_T tx;
std::mutex rxMutex;
uint8_t *rxBuffer;
RING_FIFO_CB_T rxQueue;
std::mutex txMutex;
uint8_t *txBuffer;
RING_FIFO_CB_T txQueue;
bool parser(IN uint8_t byte);
void send(void);
void convert(void *buf);
uint32_t pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payloadSize);
bool getRxPack(OUT void *pack);
public:
// funtions
uint32_t setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos);
uint32_t setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed);
uint32_t setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed);
uint32_t setGimabalHome(void);
uint32_t takePic(void);
uint32_t setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState);
// builds
static amovGimbal::IamovGimbalBase *creat(amovGimbal::IOStreamBase *_IO)
{
return new g1GimbalDriver(_IO);
}
g1GimbalDriver(amovGimbal::IOStreamBase *_IO);
~g1GimbalDriver()
{
free(rxBuffer);
free(txBuffer);
}
};
#endif

View File

@ -0,0 +1,118 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-03-02 10:00:52
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-17 18:29:33
* @FilePath: \gimbal-sdk-multi-platform\src\G1\g1_gimbal_funtion.cpp
*/
#include "g1_gimbal_driver.h"
#include "g1_gimbal_crc32.h"
#include "string.h"
/**
* It sets the gimbal position.
*
* @param pos the position of the gimbal
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t g1GimbalDriver::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos)
{
G1::GIMBAL_SET_POS_MSG_T temp;
temp.mode = G1::GIMBAL_CMD_POS_MODE_ANGLE;
temp.angle_pitch = pos.pitch / G1_SCALE_FACTOR;
temp.angle_roll = pos.roll / G1_SCALE_FACTOR;
temp.angle_yaw = pos.yaw / G1_SCALE_FACTOR;
temp.speed_pitch = state.maxFollow.pitch;
temp.speed_roll = state.maxFollow.roll;
temp.speed_yaw = state.maxFollow.yaw;
return pack(G1::GIMBAL_CMD_SET_POS, reinterpret_cast<uint8_t *>(&temp), sizeof(G1::GIMBAL_SET_POS_MSG_T));
}
/**
* It takes a struct of type amovGimbal::AMOV_GIMBAL_POS_T and converts it to a struct of type
* G1::GIMBAL_SET_POS_MSG_T
*
* @param speed the speed of the gimbal
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t g1GimbalDriver::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed)
{
G1::GIMBAL_SET_POS_MSG_T temp;
temp.mode = G1::GIMBAL_CMD_POS_MODE_SPEED;
temp.angle_pitch = 0;
temp.angle_roll = 0;
temp.angle_yaw = 0;
temp.speed_pitch = speed.pitch / G1_SCALE_FACTOR;
temp.speed_roll = speed.roll / G1_SCALE_FACTOR;
temp.speed_yaw = speed.yaw / G1_SCALE_FACTOR;
return pack(G1::GIMBAL_CMD_SET_POS, reinterpret_cast<uint8_t *>(&temp), sizeof(G1::GIMBAL_SET_POS_MSG_T));
}
/**
* This function sets the gimbal's follow speed
*
* @param followSpeed the speed of the gimbal
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t g1GimbalDriver::setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed)
{
state.maxFollow.pitch = followSpeed.pitch / G1_SCALE_FACTOR;
state.maxFollow.roll = followSpeed.roll / G1_SCALE_FACTOR;
state.maxFollow.yaw = followSpeed.yaw / G1_SCALE_FACTOR;
return 0;
}
/**
* This function sets the gimbal to its home position
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t g1GimbalDriver::setGimabalHome(void)
{
G1::GIMBAL_SET_POS_MSG_T temp;
temp.mode = G1::GIMBAL_CMD_POS_MODE_HOME;
temp.speed_pitch = state.maxFollow.pitch;
temp.speed_roll = state.maxFollow.roll;
temp.speed_yaw = state.maxFollow.yaw;
return pack(G1::GIMBAL_CMD_SET_POS, reinterpret_cast<uint8_t *>(&temp), sizeof(G1::GIMBAL_SET_POS_MSG_T));
}
/**
* It takes a picture.
*
* @return The return value is the number of bytes written to the serial port.
*/
uint32_t g1GimbalDriver::takePic(void)
{
uint8_t temp = G1::GIMBAL_CMD_CAMERA_TACK;
return pack(G1::GIMBAL_CMD_CAMERA, &temp, sizeof(uint8_t));
}
/**
* The function sets the video state of the gimbal
*
* @param newState The new state of the video.
*
* @return The return value is the number of bytes written to the serial port.
*/
uint32_t g1GimbalDriver::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState)
{
uint8_t temp = G1::GIMBAL_CMD_CAMERA_REC;
mState.lock();
if(state.video == amovGimbal::AMOV_GIMBAL_VIDEO_TAKE)
{
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_OFF;
}
else
{
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_TAKE;
}
mState.unlock();
return pack(G1::GIMBAL_CMD_CAMERA, &temp, sizeof(uint8_t));
}

View File

@ -0,0 +1,91 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2022-10-27 18:10:07
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-17 18:12:57
* @FilePath: \gimbal-sdk-multi-platform\src\G1\g1_gimbal_struct.h
*/
#ifndef G1_GIMBAL_STRUCT_H
#define G1_GIMBAL_STRUCT_H
#include <stdint.h>
namespace G1
{
#define G1_MAX_GIMBAL_PAYLOAD 256
#define G1_PAYLOAD_OFFSET 5
#define G1_SCALE_FACTOR 0.01f
#define G1_SERIAL_HEAD 0XAE
#define G1_SERIAL_VERSION 0X01
typedef enum
{
GIMBAL_CMD_SET_POS = 0X85,
GIMBAL_CMD_CAMERA = 0X86,
GIMBAL_CMD_RCV_POS = 0X87
} GIMBAL_CMD_T;
typedef enum
{
GIMBAL_CMD_POS_MODE_SPEED = 1,
GIMBAL_CMD_POS_MODE_ANGLE = 2,
GIMBAL_CMD_POS_MODE_HOME = 3
} GIMBAL_CMD_POS_MODE_T;
typedef enum
{
GIMBAL_CMD_CAMERA_REC = 1,
GIMBAL_CMD_CAMERA_TACK = 2
} GIMBAL_CMD_CAMERA_T;
typedef enum
{
GIMBAL_SERIAL_STATE_IDLE,
GIMBAL_SERIAL_STATE_VERSION,
GIMBAL_SERIAL_STATE_LENGHT,
GIMBAL_SERIAL_STATE_CMD,
GIMBAL_SERIAL_STATE_CHECK,
GIMBAL_SERIAL_STATE_PAYLOAD,
} GIMBAL_CMD_PARSER_STATE_T;
#pragma pack(1)
typedef struct
{
uint8_t head;
uint8_t version;
uint8_t lenght;
uint8_t cmd;
uint8_t checksum;
uint8_t payload[G1_MAX_GIMBAL_PAYLOAD + sizeof(uint32_t)];
union
{
uint8_t u8[4];
uint32_t u32;
} crc;
} GIMBAL_FRAME_T;
typedef struct
{
uint8_t mode;
int16_t angle_roll;
int16_t angle_pitch;
int16_t angle_yaw;
int16_t speed_roll;
int16_t speed_pitch;
int16_t speed_yaw;
} GIMBAL_SET_POS_MSG_T;
typedef struct
{
int16_t IMU_roll;
int16_t IMU_pitch;
int16_t IMU_yaw;
int16_t HALL_roll;
int16_t HALL_pitch;
int16_t HALL_yaw;
} GIMBAL_RCV_POS_MSG_T;
#pragma pack()
}
#endif

View File

@ -0,0 +1,166 @@
#ifndef __G2_GIMBAL_CHECK_H
#define __G2_GIMBAL_CHECK_H
namespace G2
{
#include "stdint.h"
const uint16_t crc16_tab[256] = {
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0};
/**
* "For each byte in the data, shift the CRC register left by 8 bits, XOR the CRC register with the CRC
* table value for the byte, and then shift the CRC register right by 8 bits."
*
* The CRC table is a 256-byte array of 16-bit values. The index into the table is the byte value.
* The value in the table is the CRC value for that byte. The CRC table is generated by the following
* function:
*
* @param data pointer to the data to be checked
* @param len the length of the data to be checked
*
* @return The CRC value.
* @note 16 bit CRC with polynomial x^16+x^12+x^5+1
*/
static inline uint16_t checkCrc16(uint8_t *pData, uint32_t len)
{
uint16_t crc = 0XFFFF;
uint32_t idx = 0;
for (idx = 0; idx < len; idx++)
{
crc = crc16_tab[((crc >> 8) ^ pData[idx]) & 0xFF] ^ (crc << 8);
}
return crc;
}
const unsigned int Crc32Table[256] = {
0x00000000, 0x04C11DB7, 0x09823B6E, 0x0D4326D9, 0x130476DC, 0x17C56B6B,
0x1A864DB2, 0x1E475005, 0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6, 0x2B4BCB61,
0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD, 0x4C11DB70, 0x48D0C6C7,
0x4593E01E, 0x4152FDA9, 0x5F15ADAC, 0x5BD4B01B, 0x569796C2, 0x52568B75,
0x6A1936C8, 0x6ED82B7F, 0x639B0DA6, 0x675A1011, 0x791D4014, 0x7DDC5DA3,
0x709F7B7A, 0x745E66CD, 0x9823B6E0, 0x9CE2AB57, 0x91A18D8E, 0x95609039,
0x8B27C03C, 0x8FE6DD8B, 0x82A5FB52, 0x8664E6E5, 0xBE2B5B58, 0xBAEA46EF,
0xB7A96036, 0xB3687D81, 0xAD2F2D84, 0xA9EE3033, 0xA4AD16EA, 0xA06C0B5D,
0xD4326D90, 0xD0F37027, 0xDDB056FE, 0xD9714B49, 0xC7361B4C, 0xC3F706FB,
0xCEB42022, 0xCA753D95, 0xF23A8028, 0xF6FB9D9F, 0xFBB8BB46, 0xFF79A6F1,
0xE13EF6F4, 0xE5FFEB43, 0xE8BCCD9A, 0xEC7DD02D, 0x34867077, 0x30476DC0,
0x3D044B19, 0x39C556AE, 0x278206AB, 0x23431B1C, 0x2E003DC5, 0x2AC12072,
0x128E9DCF, 0x164F8078, 0x1B0CA6A1, 0x1FCDBB16, 0x018AEB13, 0x054BF6A4,
0x0808D07D, 0x0CC9CDCA, 0x7897AB07, 0x7C56B6B0, 0x71159069, 0x75D48DDE,
0x6B93DDDB, 0x6F52C06C, 0x6211E6B5, 0x66D0FB02, 0x5E9F46BF, 0x5A5E5B08,
0x571D7DD1, 0x53DC6066, 0x4D9B3063, 0x495A2DD4, 0x44190B0D, 0x40D816BA,
0xACA5C697, 0xA864DB20, 0xA527FDF9, 0xA1E6E04E, 0xBFA1B04B, 0xBB60ADFC,
0xB6238B25, 0xB2E29692, 0x8AAD2B2F, 0x8E6C3698, 0x832F1041, 0x87EE0DF6,
0x99A95DF3, 0x9D684044, 0x902B669D, 0x94EA7B2A, 0xE0B41DE7, 0xE4750050,
0xE9362689, 0xEDF73B3E, 0xF3B06B3B, 0xF771768C, 0xFA325055, 0xFEF34DE2,
0xC6BCF05F, 0xC27DEDE8, 0xCF3ECB31, 0xCBFFD686, 0xD5B88683, 0xD1799B34,
0xDC3ABDED, 0xD8FBA05A, 0x690CE0EE, 0x6DCDFD59, 0x608EDB80, 0x644FC637,
0x7A089632, 0x7EC98B85, 0x738AAD5C, 0x774BB0EB, 0x4F040D56, 0x4BC510E1,
0x46863638, 0x42472B8F, 0x5C007B8A, 0x58C1663D, 0x558240E4, 0x51435D53,
0x251D3B9E, 0x21DC2629, 0x2C9F00F0, 0x285E1D47, 0x36194D42, 0x32D850F5,
0x3F9B762C, 0x3B5A6B9B, 0x0315D626, 0x07D4CB91, 0x0A97ED48, 0x0E56F0FF,
0x1011A0FA, 0x14D0BD4D, 0x19939B94, 0x1D528623, 0xF12F560E, 0xF5EE4BB9,
0xF8AD6D60, 0xFC6C70D7, 0xE22B20D2, 0xE6EA3D65, 0xEBA91BBC, 0xEF68060B,
0xD727BBB6, 0xD3E6A601, 0xDEA580D8, 0xDA649D6F, 0xC423CD6A, 0xC0E2D0DD,
0xCDA1F604, 0xC960EBB3, 0xBD3E8D7E, 0xB9FF90C9, 0xB4BCB610, 0xB07DABA7,
0xAE3AFBA2, 0xAAFBE615, 0xA7B8C0CC, 0xA379DD7B, 0x9B3660C6, 0x9FF77D71,
0x92B45BA8, 0x9675461F, 0x8832161A, 0x8CF30BAD, 0x81B02D74, 0x857130C3,
0x5D8A9099, 0x594B8D2E, 0x5408ABF7, 0x50C9B640, 0x4E8EE645, 0x4A4FFBF2,
0x470CDD2B, 0x43CDC09C, 0x7B827D21, 0x7F436096, 0x7200464F, 0x76C15BF8,
0x68860BFD, 0x6C47164A, 0x61043093, 0x65C52D24, 0x119B4BE9, 0x155A565E,
0x18197087, 0x1CD86D30, 0x029F3D35, 0x065E2082, 0x0B1D065B, 0x0FDC1BEC,
0x3793A651, 0x3352BBE6, 0x3E119D3F, 0x3AD08088, 0x2497D08D, 0x2056CD3A,
0x2D15EBE3, 0x29D4F654, 0xC5A92679, 0xC1683BCE, 0xCC2B1D17, 0xC8EA00A0,
0xD6AD50A5, 0xD26C4D12, 0xDF2F6BCB, 0xDBEE767C, 0xE3A1CBC1, 0xE760D676,
0xEA23F0AF, 0xEEE2ED18, 0xF0A5BD1D, 0xF464A0AA, 0xF9278673, 0xFDE69BC4,
0x89B8FD09, 0x8D79E0BE, 0x803AC667, 0x84FBDBD0, 0x9ABC8BD5, 0x9E7D9662,
0x933EB0BB, 0x97FFAD0C, 0xAFB010B1, 0xAB710D06, 0xA6322BDF, 0xA2F33668,
0xBCB4666D, 0xB8757BDA, 0xB5365D03, 0xB1F740B4};
/**
* For each byte in the input data, XOR the current CRC value with the byte, then shift the CRC value
* left 8 bits, and XOR the CRC value with the CRC table value for the byte
*
* @param pData pointer to the data to be CRC'd
* @param Length The length of the data to be CRC'd.
*
* @return The CRC32 value of the data.
*/
static inline uint32_t checkCRC32(uint8_t *pData, uint32_t Length)
{
unsigned int nReg;
unsigned int nTemp = 0;
unsigned short i, n;
nReg = 0xFFFFFFFF;
for (n = 0; n < Length; n++)
{
nReg ^= (unsigned int)pData[n];
for (i = 0; i < 4; i++)
{
nTemp = Crc32Table[(unsigned char)((nReg >> 24) & 0xff)];
nReg <<= 8;
nReg ^= nTemp;
}
}
return nReg;
}
/**
* It takes a pointer to an array of bytes and the length of the array, and returns the sum of the
* bytes in the array
*
* @param pData The data to be calculated
* @param Lenght The length of the data to be sent.
*
* @return The sum of the bytes in the array.
*/
static inline unsigned char CheckSum(unsigned char *pData, unsigned short Lenght)
{
unsigned short temp = 0;
unsigned short i = 0;
for (i = 0; i < Lenght; i++)
{
temp += pData[i];
}
return temp & 0XFF;
}
}
#endif

View File

@ -0,0 +1,243 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-03-01 10:12:58
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-11 17:33:42
* @FilePath: /gimbal-sdk-multi-platform/src/G2/g2_gimbal_driver.cpp
*/
#include "g2_gimbal_driver.h"
#include "g2_gimbal_crc.h"
#include "string.h"
/**
* The function creates a new instance of the g2GimbalDriver class, which is a subclass of the
* IamovGimbalBase class
*
* @param _IO The IOStreamBase class that is used to communicate with the gimbal.
*/
g2GimbalDriver::g2GimbalDriver(amovGimbal::IOStreamBase *_IO) : amovGimbal::IamovGimbalBase(_IO)
{
memset(&rxQueue, 0, sizeof(RING_FIFO_CB_T));
memset(&txQueue, 0, sizeof(RING_FIFO_CB_T));
rxBuffer = (uint8_t *)malloc(MAX_QUEUE_SIZE * sizeof(G2::GIMBAL_FRAME_T));
if (rxBuffer == NULL)
{
std::cout << "Receive buffer creation failed! Size : " << MAX_QUEUE_SIZE << std::endl;
exit(1);
}
txBuffer = (uint8_t *)malloc(MAX_QUEUE_SIZE * sizeof(G2::GIMBAL_FRAME_T));
if (txBuffer == NULL)
{
free(rxBuffer);
std::cout << "Send buffer creation failed! Size : " << MAX_QUEUE_SIZE << std::endl;
exit(1);
}
Ring_Fifo_init(&rxQueue, sizeof(G2::GIMBAL_FRAME_T), rxBuffer, MAX_QUEUE_SIZE * sizeof(G2::GIMBAL_FRAME_T));
Ring_Fifo_init(&txQueue, sizeof(G2::GIMBAL_FRAME_T), txBuffer, MAX_QUEUE_SIZE * sizeof(G2::GIMBAL_FRAME_T));
parserState = G2::GIMBAL_SERIAL_STATE_IDEL;
}
/**
* It takes a command, a pointer to a payload, and the size of the payload, and then it puts the
* command, the payload, and the CRC into a ring buffer
*
* @param uint32_t 4 bytes
* @param pPayload pointer to the data to be sent
* @param payloadSize the size of the payload in bytes
*
* @return The number of bytes in the packet.
*/
uint32_t g2GimbalDriver::pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payloadSize)
{
uint32_t ret = 0;
G2::GIMBAL_FRAME_T txTemp;
txTemp.head = G2_SERIAL_HEAD;
txTemp.version = G2_SERIAL_VERSION;
txTemp.len = payloadSize;
txTemp.command = cmd;
txTemp.source = self;
txTemp.target = remote;
memcpy(txTemp.data, pPayload, payloadSize);
txTemp.crc.f16 = G2::checkCrc16((uint8_t *)&txTemp, txTemp.len + G2_PAYLOAD_OFFSET);
memcpy(txTemp.data + payloadSize, txTemp.crc.f8, sizeof(uint16_t));
txMutex.lock();
if (Ring_Fifo_in_cell(&txQueue, &txTemp))
{
ret = txTemp.len + G2_PAYLOAD_OFFSET + sizeof(uint16_t);
}
txMutex.unlock();
return ret;
}
/**
* > This function is used to get a packet from the receive queue
*
* @param void This is the type of data that will be stored in the queue.
*
* @return A boolean value.
*/
bool g2GimbalDriver::getRxPack(OUT void *pack)
{
bool state = false;
rxMutex.lock();
state = Ring_Fifo_out_cell(&rxQueue, pack);
rxMutex.unlock();
return state;
}
/**
* The function takes a pointer to a buffer, casts it to a pointer to a G2::GIMBAL_FRAME_T, and then
* checks the command field of the frame. If the command is G2::IAP_COMMAND_BLOCK_END, it locks the
* mutex, and then unlocks it. Otherwise, it prints out the contents of the buffer
*
* @param buf pointer to the data received from the gimbal
*/
void g2GimbalDriver::convert(void *buf)
{
G2::GIMBAL_FRAME_T *temp;
temp = reinterpret_cast<G2::GIMBAL_FRAME_T *>(buf);
switch (temp->command)
{
case G2::IAP_COMMAND_BLOCK_END:
mState.lock();
updateGimbalStateCallback(state.abs.roll, state.abs.pitch, state.abs.yaw,
state.rel.roll, state.rel.pitch, state.rel.yaw,
state.fov.x, state.fov.y);
mState.unlock();
break;
default:
std::cout << "Undefined frame from G2 : ";
for (uint16_t i = 0; i < temp->len + G2_PAYLOAD_OFFSET + sizeof(uint32_t); i++)
{
printf("%02X ", ((uint8_t *)buf)[i]);
}
std::cout << std::endl;
break;
}
}
/**
* If the serial port is not busy and is open, then lock the txMutex, get the next byte from the
* txQueue, unlock the txMutex, and send the byte
*/
void g2GimbalDriver::send(void)
{
if (!IO->isBusy() && IO->isOpen())
{
bool state = false;
txMutex.lock();
state = Ring_Fifo_out_cell(&txQueue, &tx);
txMutex.unlock();
if (state)
{
IO->outPutBytes((uint8_t *)&tx, tx.len + G2_PAYLOAD_OFFSET + sizeof(uint16_t));
}
}
}
/**
* The function is called every time a byte is received from the serial port. It parses the byte and
* stores it in a buffer. When the buffer is full, it checks the CRC and if it's correct, it stores the
* buffer in a queue
*
* @param uint8_t unsigned char
*
* @return The parser function is returning a boolean value.
*/
bool g2GimbalDriver::parser(IN uint8_t byte)
{
bool state = false;
static uint8_t payloadLenghte = 0;
static uint8_t *pRx = NULL;
switch (parserState)
{
case G2::GIMBAL_SERIAL_STATE_IDEL:
if (byte == G2_SERIAL_HEAD)
{
rx.head = byte;
parserState = G2::GIMBAL_SERIAL_STATE_HEAD_RCV;
}
break;
case G2::GIMBAL_SERIAL_STATE_HEAD_RCV:
if (byte == G2_SERIAL_VERSION)
{
rx.version = byte;
parserState = G2::GIMBAL_SERIAL_STATE_VERSION_RCV;
}
else
{
rx.head = 0;
parserState = G2::GIMBAL_SERIAL_STATE_IDEL;
}
break;
case G2::GIMBAL_SERIAL_STATE_VERSION_RCV:
rx.target = byte;
parserState = G2::GIMBAL_SERIAL_STATE_TARGET_RCV;
break;
case G2::GIMBAL_SERIAL_STATE_TARGET_RCV:
rx.source = byte;
parserState = G2::GIMBAL_SERIAL_STATE_SOURCE_RCV;
break;
case G2::GIMBAL_SERIAL_STATE_SOURCE_RCV:
rx.len = byte;
parserState = G2::GIMBAL_SERIAL_STATE_LENGHT_RCV;
pRx = rx.data;
payloadLenghte = byte;
break;
case G2::GIMBAL_SERIAL_STATE_LENGHT_RCV:
rx.command = byte;
parserState = G2::GIMBAL_SERIAL_STATE_DATA_RCV;
break;
case G2::GIMBAL_SERIAL_STATE_DATA_RCV:
*pRx = byte;
payloadLenghte--;
if (payloadLenghte == 0)
{
parserState = G2::GIMBAL_SERIAL_STATE_CRC_RCV1;
}
break;
case G2::GIMBAL_SERIAL_STATE_CRC_RCV1:
rx.crc.f8[1] = byte;
parserState = G2::GIMBAL_SERIAL_STATE_END;
break;
case G2::GIMBAL_SERIAL_STATE_END:
rx.crc.f8[0] = byte;
if (rx.crc.f16 == G2::checkCrc16((uint8_t *)&rx, G2_PAYLOAD_OFFSET + rx.len))
{
state = true;
rxMutex.lock();
Ring_Fifo_in_cell(&rxQueue, &rx);
rxMutex.unlock();
}
else
{
memset(&rx, 0, sizeof(G2::GIMBAL_FRAME_T));
}
parserState = G2::GIMBAL_SERIAL_STATE_IDEL;
break;
default:
parserState = G2::GIMBAL_SERIAL_STATE_IDEL;
break;
}
return state;
}

View File

@ -0,0 +1,90 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-03-01 10:02:24
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-13 12:29:33
* @FilePath: \gimbal-sdk-multi-platform\src\G2\g2_gimbal_driver.h
*/
#include "../amov_gimbal.h"
#include "g2_gimbal_struct.h"
#include <mutex>
#include <malloc.h>
#include <iostream>
#ifndef __G2_DRIVER_H
#define __G2_DRIVER_H
extern "C"
{
#include "Ring_Fifo.h"
}
class g2GimbalDriver : protected amovGimbal::IamovGimbalBase
{
private:
G2::GIMBAL_CMD_PARSER_STATE_T parserState;
G2::GIMBAL_FRAME_T rx;
G2::GIMBAL_FRAME_T tx;
std::mutex rxMutex;
uint8_t *rxBuffer;
RING_FIFO_CB_T rxQueue;
std::mutex txMutex;
uint8_t *txBuffer;
RING_FIFO_CB_T txQueue;
uint8_t self;
uint8_t remote;
bool parser(IN uint8_t byte);
void send(void);
void convert(void *buf);
uint32_t pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payloadSize);
bool getRxPack(OUT void *pack);
public:
void nodeSet(SET uint32_t _self, SET uint32_t _remote)
{
self = _self;
remote = _remote;
}
// funtion
uint32_t setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos);
uint32_t setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed);
uint32_t setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed);
uint32_t setGimabalHome(void);
uint32_t takePic(void);
uint32_t setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState);
#ifdef AMOV_HOST
// iap funtion (内部源码模式提供功能 lib模式下不可见)
bool iapGetSoftInfo(std::string &info);
bool iapGetHardInfo(std::string &info);
bool iapJump(G2::GIMBAL_IAP_STATE_T &state);
bool iapFlashErase(G2::GIMBAL_IAP_STATE_T &state);
bool iapSendBlockInfo(uint32_t &startAddr, uint32_t &crc32);
bool iapSendBlockData(uint8_t offset, uint8_t *data);
bool iapFlashWrite(uint32_t &crc32, G2::GIMBAL_IAP_STATE_T &state);
// 判断是否需要跳转
bool iapJumpCheck(std::string &info) { return true; }
#endif
static amovGimbal::IamovGimbalBase *creat(amovGimbal::IOStreamBase *_IO)
{
return new g2GimbalDriver(_IO);
}
g2GimbalDriver(amovGimbal::IOStreamBase *_IO);
~g2GimbalDriver()
{
free(rxBuffer);
free(txBuffer);
}
};
#endif

View File

@ -0,0 +1,81 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-03-13 11:58:54
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-13 12:31:58
* @FilePath: \gimbal-sdk-multi-platform\src\G2\g2_gimbal_funtion.cpp
*/
#include "g2_gimbal_driver.h"
#include "g2_gimbal_crc.h"
#include "string.h"
/**
* It sets the gimbal position.
*
* @param pos the position of the gimbal
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t g2GimbalDriver::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos)
{
return 0;
}
/**
* It takes a struct of type amovGimbal::AMOV_GIMBAL_POS_T and converts it to a struct of type
* G1::GIMBAL_SET_POS_MSG_T
*
* @param speed the speed of the gimbal
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t g2GimbalDriver::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed)
{
return 0;
}
/**
* This function sets the gimbal's follow speed
*
* @param followSpeed the speed of the gimbal
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t g2GimbalDriver::setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed)
{
return 0;
}
/**
* This function sets the gimbal to its home position
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t g2GimbalDriver::setGimabalHome(void)
{
return 0;
}
/**
* It takes a picture.
*
* @return The return value is the number of bytes written to the serial port.
*/
uint32_t g2GimbalDriver::takePic(void)
{
return 0;
}
/**
* The function sets the video state of the gimbal
*
* @param newState The new state of the video.
*
* @return The return value is the number of bytes written to the serial port.
*/
uint32_t g2GimbalDriver::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState)
{
return 0;
}

View File

@ -0,0 +1,357 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-03-02 11:16:52
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-18 10:13:08
* @FilePath: /gimbal-sdk-multi-platform/src/G2/g2_gimbal_iap_funtion.cpp
*/
#ifdef AMOV_HOST
#include "g2_gimbal_driver.h"
#include "g2_gimbal_crc.h"
#include "string.h"
#include <chrono>
#define MAX_WAIT_TIME_MS 2000
/**
* It gets the software information from the gimbal.
*
* @param info the string to store the information
*
* @return a boolean value.
*/
bool g2GimbalDriver::iapGetSoftInfo(std::string &info)
{
uint8_t temp = 0;
bool ret = false;
G2::GIMBAL_FRAME_T ack;
pack(G2::IAP_COMMAND_SOFT_INFO, &temp, 1);
std::chrono::milliseconds startMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
while (1)
{
if (getRxPack(&ack))
{
if (ack.command == G2::IAP_COMMAND_SOFT_INFO &&
ack.target == self &&
ack.source == remote)
{
info = (char *)ack.data;
std::cout << info << std::endl;
ret = true;
break;
}
}
std::chrono::milliseconds nowMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
if ((nowMs - startMs) > std::chrono::milliseconds(MAX_WAIT_TIME_MS))
{
break;
}
}
return ret;
}
/**
* It gets the hardware information of the gimbal.
*
* @param info the string to store the hardware information
*
* @return a boolean value.
*/
bool g2GimbalDriver::iapGetHardInfo(std::string &info)
{
uint8_t temp = 0;
bool ret = false;
G2::GIMBAL_FRAME_T ack;
pack(G2::IAP_COMMAND_HARDWARE_INFO, &temp, 1);
std::chrono::milliseconds startMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
while (1)
{
if (getRxPack(&ack))
{
if (ack.command == G2::IAP_COMMAND_HARDWARE_INFO &&
ack.target == self &&
ack.source == remote)
{
info = (char *)ack.data;
std::cout << info << std::endl;
ret = true;
break;
}
}
std::chrono::milliseconds nowMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
if ((nowMs - startMs) > std::chrono::milliseconds(MAX_WAIT_TIME_MS))
{
break;
}
}
return ret;
}
/**
* It sends a command to the gimbal to jump to the bootloader, and then waits for a response from the
* bootloader
*
* @param state the state of the gimbal, 0: normal, 1: iap
*
* @return The return value is a boolean.
*/
bool g2GimbalDriver::iapJump(G2::GIMBAL_IAP_STATE_T &state)
{
uint8_t temp = 0;
bool ret = true;
G2::GIMBAL_FRAME_T ack;
pack(G2::IAP_COMMAND_JUMP, &temp, 1);
std::chrono::milliseconds startMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
// It fails if the specified message is received.
while (1)
{
if (getRxPack(&ack))
{
if (ack.command == G2::IAP_COMMAND_JUMP &&
ack.target == self &&
ack.source == remote)
{
state = (G2::GIMBAL_IAP_STATE_T)ack.data[1];
ret = false;
break;
}
}
std::chrono::milliseconds nowMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
if ((nowMs - startMs) > std::chrono::milliseconds(MAX_WAIT_TIME_MS))
{
break;
}
}
return ret;
}
/**
* The function sends a command to the gimbal to erase the flash memory
*
* @param state The state of the IAP process.
*
* @return The return value is a boolean.
*/
bool g2GimbalDriver::iapFlashErase(G2::GIMBAL_IAP_STATE_T &state)
{
uint8_t temp = 0;
bool ret = false;
G2::GIMBAL_FRAME_T ack;
pack(G2::IAP_COMMAND_FLASH_ERASE, &temp, 1);
std::chrono::milliseconds startMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
while (1)
{
if (getRxPack(&ack))
{
if (ack.command == G2::IAP_COMMAND_FLASH_ERASE &&
ack.target == self &&
ack.source == remote)
{
state = (G2::GIMBAL_IAP_STATE_T)ack.data[1];
ret = true;
break;
}
}
std::chrono::milliseconds nowMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
if ((nowMs - startMs) > std::chrono::milliseconds(MAX_WAIT_TIME_MS))
{
break;
}
}
return ret;
}
/**
* It sends a block of data to the gimbal, and waits for an acknowledgement
*
* @param startAddr the start address of the block to be sent
* @param crc32 The CRC32 of the data to be sent.
*
* @return a boolean value.
*/
bool g2GimbalDriver::iapSendBlockInfo(uint32_t &startAddr, uint32_t &crc32)
{
union
{
uint32_t f32;
uint8_t f8[4];
} temp;
uint8_t buf[8] = {0, 0, 0, 0, 0, 0, 0, 0};
bool ret = false;
G2::GIMBAL_FRAME_T ack;
temp.f32 = startAddr;
memcpy(buf, temp.f8, sizeof(uint32_t));
temp.f32 = crc32;
memcpy(buf, temp.f8, sizeof(uint32_t));
pack(G2::IAP_COMMAND_BOLCK_INFO, buf, 8);
std::chrono::milliseconds startMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
while (1)
{
if (getRxPack(&ack))
{
if (ack.command == G2::IAP_COMMAND_BOLCK_INFO &&
ack.target == self &&
ack.source == remote)
{
ret = true;
for (uint8_t i = 0; i < 8; i++)
{
if (buf[i] != ack.data[i])
{
ret = false;
}
}
break;
}
}
std::chrono::milliseconds nowMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
if ((nowMs - startMs) > std::chrono::milliseconds(MAX_WAIT_TIME_MS))
{
break;
}
}
return ret;
}
/**
* It sends a block of data to the gimbal, and waits for an acknowledgement
*
* @param offset the offset of the data block in the file
* @param data pointer to the data to be sent
*
* @return The return value is a boolean.
*/
bool g2GimbalDriver::iapSendBlockData(uint8_t offset, uint8_t *data)
{
bool ret = false;
G2::GIMBAL_FRAME_T ack;
pack(G2::IAP_COMMAND_BLOCK_START + offset, data, 64);
std::chrono::milliseconds startMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
while (1)
{
if (getRxPack(&ack))
{
if (ack.command == G2::IAP_COMMAND_BLOCK_START + offset &&
ack.target == self &&
ack.source == remote)
{
ret = true;
for (uint8_t i = 0; i < 64; i++)
{
if (data[i] != ack.data[i])
{
ret = false;
}
}
break;
}
}
std::chrono::milliseconds nowMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
if ((nowMs - startMs) > std::chrono::milliseconds(MAX_WAIT_TIME_MS))
{
break;
}
}
return ret;
}
/**
* The function sends a block of data to the gimbal, and waits for an acknowledgement
*
* @param crc32 The CRC32 of the data block
* @param state The state of the IAP process.
*
* @return The return value is a boolean.
*/
bool g2GimbalDriver::iapFlashWrite(uint32_t &crc32, G2::GIMBAL_IAP_STATE_T &state)
{
bool ret = false;
G2::GIMBAL_FRAME_T ack;
union
{
uint32_t f32;
uint8_t f8[4];
} temp;
temp.f32 = crc32;
pack(G2::IAP_COMMAND_BLOCK_WRITE, temp.f8, sizeof(uint32_t));
std::chrono::milliseconds startMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
while (1)
{
if (getRxPack(&ack))
{
if (ack.command == G2::IAP_COMMAND_BLOCK_WRITE &&
ack.target == self &&
ack.source == remote)
{
state = (G2::GIMBAL_IAP_STATE_T)ack.data[4];
ret = true;
for (uint8_t i = 0; i < 4; i++)
{
if (temp.f8[i] != ack.data[i])
{
ret = false;
}
}
break;
}
}
std::chrono::milliseconds nowMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
if ((nowMs - startMs) > std::chrono::milliseconds(MAX_WAIT_TIME_MS))
{
break;
}
}
return ret;
}
#endif

View File

@ -0,0 +1,81 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-03-01 09:21:57
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-18 10:13:23
* @FilePath: /gimbal-sdk-multi-platform/src/G2/g2_gimbal_struct.h
*/
#ifndef G2_GIMBAL_STRUCT_H
#define G2_GIMBAL_STRUCT_H
#include <stdint.h>
namespace G2
{
#define G2_MAX_GIMBAL_PAYLOAD 64
#define G2_PAYLOAD_OFFSET 6
#define G2_SCALE_FACTOR 0.01f
#define G2_SERIAL_HEAD 0XAF
#define G2_SERIAL_VERSION 0X02
typedef enum
{
IAP_COMMAND_JUMP = 80,
IAP_COMMAND_FLASH_ERASE,
IAP_COMMAND_BOLCK_INFO,
IAP_COMMAND_BLOCK_WRITE,
IAP_COMMAND_SOFT_INFO,
IAP_COMMAND_HARDWARE_INFO,
IAP_COMMAND_BLOCK_START,
IAP_COMMAND_BLOCK_END = 117,
} GIMBAL_CMD_T;
typedef enum
{
IAP_STATE_FAILD = 0,
IAP_STATE_SUCCEED,
IAP_STATE_READY,
IAP_STATE_FIRMWARE_BROKE,
IAP_STATE_JUMP_FAILD,
IAP_STATE_ADDR_ERR,
IAP_STATE_CRC_ERR,
IAP_STATE_WRITE_ERR,
IAP_STATE_WRITE_TIMEOUT,
} GIMBAL_IAP_STATE_T;
typedef enum
{
GIMBAL_SERIAL_STATE_IDEL = 0,
GIMBAL_SERIAL_STATE_HEAD_RCV,
GIMBAL_SERIAL_STATE_VERSION_RCV,
GIMBAL_SERIAL_STATE_TARGET_RCV,
GIMBAL_SERIAL_STATE_SOURCE_RCV,
GIMBAL_SERIAL_STATE_LENGHT_RCV,
GIMBAL_SERIAL_STATE_DATA_RCV,
GIMBAL_SERIAL_STATE_CRC_RCV1,
GIMBAL_SERIAL_STATE_END,
} GIMBAL_CMD_PARSER_STATE_T;
#pragma pack(1)
typedef struct
{
uint8_t head;
uint8_t version;
uint8_t target;
uint8_t source;
uint8_t len;
uint8_t command;
uint8_t data[G2_MAX_GIMBAL_PAYLOAD];
union
{
uint8_t f8[2];
uint16_t f16;
} crc;
} GIMBAL_FRAME_T;
#pragma pack(0)
}
#endif

View File

@ -0,0 +1,27 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2022-10-27 18:10:06
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-23 17:24:23
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_crc32.h
*/
#ifndef Q10F_GIMBAL_CRC32_H
#define Q10F_GIMBAL_CRC32_H
namespace Q10f
{
static inline unsigned char CheckSum(unsigned char *pData, unsigned short Lenght)
{
unsigned short temp = 0;
unsigned short i = 0;
for (i = 0; i < Lenght; i++)
{
temp += pData[i];
}
return temp & 0XFF;
}
} // namespace name
#endif

View File

@ -0,0 +1,258 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2022-10-27 18:10:06
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-11 17:29:58
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_driver.cpp
*/
#include "Q10f_gimbal_driver.h"
#include "Q10f_gimbal_crc32.h"
#include "string.h"
/**
* The function creates a new instance of the g1GimbalDriver class, which is a subclass of the
* IamovGimbalBase class
*
* @param _IO The IOStreamBase object that will be used to communicate with the gimbal.
*/
Q10fGimbalDriver::Q10fGimbalDriver(amovGimbal::IOStreamBase *_IO) : amovGimbal::IamovGimbalBase(_IO)
{
memset(&rxQueue, 0, sizeof(RING_FIFO_CB_T));
memset(&txQueue, 0, sizeof(RING_FIFO_CB_T));
rxBuffer = (uint8_t *)malloc(MAX_QUEUE_SIZE * sizeof(Q10f::GIMBAL_FRAME_T));
if (rxBuffer == NULL)
{
std::cout << "Receive buffer creation failed! Size : " << MAX_QUEUE_SIZE << std::endl;
exit(1);
}
txBuffer = (uint8_t *)malloc(MAX_QUEUE_SIZE * sizeof(Q10f::GIMBAL_FRAME_T));
if (txBuffer == NULL)
{
free(rxBuffer);
std::cout << "Send buffer creation failed! Size : " << MAX_QUEUE_SIZE << std::endl;
exit(1);
}
Ring_Fifo_init(&rxQueue, sizeof(Q10f::GIMBAL_FRAME_T), rxBuffer, MAX_QUEUE_SIZE * sizeof(Q10f::GIMBAL_FRAME_T));
Ring_Fifo_init(&txQueue, sizeof(Q10f::GIMBAL_FRAME_T), txBuffer, MAX_QUEUE_SIZE * sizeof(Q10f::GIMBAL_FRAME_T));
parserState = Q10f::GIMBAL_SERIAL_STATE_IDLE;
// Initialize and enable attitude data return (50Hz)
uint8_t cmd = 0XFF;
pack(Q10f::GIMBAL_CMD_SET_FEEDBACK_H, &cmd, 1);
pack(Q10f::GIMBAL_CMD_SET_FEEDBACK_L, &cmd, 1);
cmd = 0X00;
pack(Q10f::GIMBAL_CMD_OPEN_FEEDBACK, &cmd, 1);
}
/**
* The function takes a command, a pointer to a payload, and the size of the payload. It then copies
* the payload into the tx buffer, calculates the checksum, and then calculates the CRC32 of the
* payload. It then copies the CRC32 into the tx buffer, and then copies the tx buffer into the txQueue
*
* @param uint32_t 4 bytes
* @param pPayload pointer to the data to be sent
* @param payloadSize the size of the payload
*
* @return The size of the data to be sent.
*/
uint32_t Q10fGimbalDriver::pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payloadSize)
{
uint32_t ret = 0;
Q10f::GIMBAL_FRAME_T txTemp;
txTemp.head = cmd;
memcpy(txTemp.data, pPayload, payloadSize);
if (cmd != Q10f::GIMBAL_CMD_SET_POS)
{
payloadSize--;
}
else
{
txTemp.data[payloadSize] = Q10f::CheckSum(pPayload, payloadSize);
}
txTemp.len = payloadSize;
txMutex.lock();
if (Ring_Fifo_in_cell(&txQueue, &txTemp))
{
ret = payloadSize + sizeof(uint32_t) + sizeof(uint8_t);
}
txMutex.unlock();
return ret;
}
/**
* > This function is used to get a packet from the receive queue
*
* @param void This is the type of data that will be stored in the queue.
*
* @return A boolean value.
*/
bool Q10fGimbalDriver::getRxPack(OUT void *pack)
{
bool state = false;
rxMutex.lock();
state = Ring_Fifo_out_cell(&rxQueue, pack);
rxMutex.unlock();
return state;
}
void Q10fGimbalDriver::convert(void *buf)
{
Q10f::GIMBAL_FRAME_T *temp;
temp = reinterpret_cast<Q10f::GIMBAL_FRAME_T *>(buf);
switch (temp->head)
{
case Q10f::GIMBAL_CMD_RCV_STATE:
Q10f::GIMBAL_RCV_POS_MSG_T *tempPos;
tempPos = reinterpret_cast<Q10f::GIMBAL_RCV_POS_MSG_T *>(((uint8_t *)buf) + Q10F_PAYLOAD_OFFSET);
mState.lock();
state.abs.yaw = tempPos->yawIMUAngle * Q10F_SCALE_FACTOR_ANGLE;
state.abs.roll = tempPos->rollIMUAngle * Q10F_SCALE_FACTOR_ANGLE;
state.abs.pitch = tempPos->pitchIMUAngle * Q10F_SCALE_FACTOR_ANGLE;
state.rel.yaw = tempPos->rollStatorRotorAngle * Q10F_SCALE_FACTOR_SPEED;
state.rel.roll = tempPos->rollStatorRotorAngle * Q10F_SCALE_FACTOR_SPEED;
state.rel.pitch = tempPos->pitchStatorRotorAngle * Q10F_SCALE_FACTOR_SPEED;
updateGimbalStateCallback(state.abs.roll, state.abs.pitch, state.abs.yaw,
state.rel.roll, state.rel.pitch, state.rel.yaw,
state.fov.x, state.fov.y);
mState.unlock();
break;
default:
std::cout << "Undefined frame from Q10f : ";
for (uint16_t i = 0; i < temp->len + Q10F_PAYLOAD_OFFSET; i++)
{
printf("%02X ", ((uint8_t *)buf)[i]);
}
std::cout << std::endl;
break;
}
}
/**
* The function is called by the main thread to send a command to the gimbal.
*
* The function first checks to see if the serial port is busy and if it is open. If it is not busy and
* it is open, the function locks the txMutex and then checks to see if there is a command in the
* txQueue. If there is a command in the txQueue, the function copies the command to the tx buffer and
* then unlocks the txMutex. The function then sends the command to the gimbal.
*
* The txQueue is a ring buffer that holds commands that are waiting to be sent to the gimbal. The
* txQueue is a ring buffer because the gimbal can only process one command at a time. If the gimbal is
* busy processing a command, the command will be placed in the txQueue and sent to the gimbal when the
* gimbal is ready to receive the command.
*/
void Q10fGimbalDriver::send(void)
{
if (!IO->isBusy() && IO->isOpen())
{
bool state = false;
txMutex.lock();
state = Ring_Fifo_out_cell(&txQueue, &tx);
txMutex.unlock();
if (state)
{
IO->outPutBytes((uint8_t *)&tx, tx.len + Q10F_PAYLOAD_OFFSET + sizeof(uint8_t));
}
}
}
/**
* It's a state machine that parses a serial stream of bytes into a struct
*
* @param uint8_t unsigned char
*
* @return A boolean value.
*/
bool Q10fGimbalDriver::parser(IN uint8_t byte)
{
bool state = false;
static uint8_t payloadLenghte = 0;
static uint8_t *pRx = NULL;
uint8_t suncheck;
switch (parserState)
{
case Q10f::GIMBAL_SERIAL_STATE_IDLE:
if (byte == ((Q10f::GIMBAL_CMD_RCV_STATE & 0X000000FF) >> 0))
{
parserState = Q10f::GIMBAL_SERIAL_STATE_HEAD1;
}
break;
case Q10f::GIMBAL_SERIAL_STATE_HEAD1:
if (byte == ((Q10f::GIMBAL_CMD_RCV_STATE & 0X0000FF00) >> 8))
{
parserState = Q10f::GIMBAL_SERIAL_STATE_HEAD2;
}
else
{
parserState = Q10f::GIMBAL_SERIAL_STATE_IDLE;
}
break;
case Q10f::GIMBAL_SERIAL_STATE_HEAD2:
if (byte == ((Q10f::GIMBAL_CMD_RCV_STATE & 0X00FF0000) >> 16))
{
parserState = Q10f::GIMBAL_SERIAL_STATE_HEAD3;
}
else
{
parserState = Q10f::GIMBAL_SERIAL_STATE_IDLE;
}
break;
case Q10f::GIMBAL_SERIAL_STATE_HEAD3:
if (byte == ((Q10f::GIMBAL_CMD_RCV_STATE & 0XFF000000) >> 24))
{
parserState = Q10f::GIMBAL_SERIAL_STATE_DATE;
payloadLenghte = sizeof(Q10f::GIMBAL_RCV_POS_MSG_T);
pRx = rx.data;
rx.head = Q10f::GIMBAL_CMD_RCV_STATE;
}
else
{
parserState = Q10f::GIMBAL_SERIAL_STATE_IDLE;
}
break;
case Q10f::GIMBAL_SERIAL_STATE_DATE:
*pRx = byte;
payloadLenghte--;
pRx++;
if (payloadLenghte == 0)
{
parserState = Q10f::GIMBAL_SERIAL_STATE_CHECK;
}
break;
case Q10f::GIMBAL_SERIAL_STATE_CHECK:
suncheck = Q10f::CheckSum(rx.data, sizeof(Q10f::GIMBAL_RCV_POS_MSG_T));
if (byte == suncheck)
{
state = true;
rxMutex.lock();
Ring_Fifo_in_cell(&rxQueue, &rx);
rxMutex.unlock();
}
else
{
memset(&rx, 0, sizeof(Q10f::GIMBAL_FRAME_T));
}
parserState = Q10f::GIMBAL_SERIAL_STATE_IDLE;
break;
default:
parserState = Q10f::GIMBAL_SERIAL_STATE_IDLE;
break;
}
return state;
}

View File

@ -0,0 +1,71 @@
/*
* @Description: Q10f吊舱的驱动文件
* @Author: L LC @amov
* @Date: 2022-10-28 12:24:21
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-28 17:01:00
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_driver.h
*/
#include "../amov_gimbal.h"
#include "Q10f_gimbal_struct.h"
#include <mutex>
#include <malloc.h>
#include <iostream>
#ifndef __Q10F_DRIVER_H
#define __Q10F_DRIVER_H
extern "C"
{
#include "Ring_Fifo.h"
}
class Q10fGimbalDriver : protected amovGimbal::IamovGimbalBase
{
private:
Q10f::GIMBAL_SERIAL_STATE_T parserState;
Q10f::GIMBAL_FRAME_T rx;
Q10f::GIMBAL_FRAME_T tx;
std::mutex rxMutex;
uint8_t *rxBuffer;
RING_FIFO_CB_T rxQueue;
std::mutex txMutex;
uint8_t *txBuffer;
RING_FIFO_CB_T txQueue;
bool parser(IN uint8_t byte);
void send(void);
void convert(void *buf);
uint32_t pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payloadSize);
bool getRxPack(OUT void *pack);
public:
// funtions
uint32_t setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos);
uint32_t setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed);
uint32_t setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed);
uint32_t setGimabalHome(void);
uint32_t setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
uint32_t setGimbalFocus(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
uint32_t takePic(void);
uint32_t setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState);
// builds
static amovGimbal::IamovGimbalBase *creat(amovGimbal::IOStreamBase *_IO)
{
return new Q10fGimbalDriver(_IO);
}
Q10fGimbalDriver(amovGimbal::IOStreamBase *_IO);
~Q10fGimbalDriver()
{
free(rxBuffer);
free(txBuffer);
}
};
#endif

View File

@ -0,0 +1,180 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-03-02 10:00:52
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-29 11:47:18
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_funtion.cpp
*/
#include "Q10f_gimbal_driver.h"
#include "Q10f_gimbal_crc32.h"
#include "string.h"
/**
* It sets the gimbal position.
*
* @param pos the position of the gimbal
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t Q10fGimbalDriver::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos)
{
Q10f::GIMBAL_SET_POS_MSG_T temp;
temp.modeR = Q10f::GIMBAL_CMD_POS_MODE_ANGLE_SPEED;
temp.modeP = Q10f::GIMBAL_CMD_POS_MODE_ANGLE_SPEED;
temp.modeY = Q10f::GIMBAL_CMD_POS_MODE_ANGLE_SPEED;
temp.angleP = pos.pitch / Q10F_SCALE_FACTOR_ANGLE;
temp.angleR = pos.roll / Q10F_SCALE_FACTOR_ANGLE;
temp.angleY = pos.yaw / Q10F_SCALE_FACTOR_ANGLE;
temp.speedP = state.maxFollow.pitch;
temp.speedR = state.maxFollow.roll;
temp.speedY = state.maxFollow.yaw;
return pack(Q10f::GIMBAL_CMD_SET_POS, reinterpret_cast<uint8_t *>(&temp), sizeof(Q10f::GIMBAL_SET_POS_MSG_T));
}
/**
* It takes a struct of type amovGimbal::AMOV_GIMBAL_POS_T and converts it to a struct of type
* G1::GIMBAL_SET_POS_MSG_T
*
* @param speed the speed of the gimbal
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t Q10fGimbalDriver::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed)
{
Q10f::GIMBAL_SET_POS_MSG_T temp;
temp.modeR = Q10f::GIMBAL_CMD_POS_MODE_SPEED;
temp.modeP = Q10f::GIMBAL_CMD_POS_MODE_SPEED;
temp.modeY = Q10f::GIMBAL_CMD_POS_MODE_SPEED;
temp.angleP = 0;
temp.angleR = 0;
temp.angleY = 0;
temp.speedP = speed.pitch / 0.1220740379f;
temp.speedR = speed.roll / 0.1220740379f;
temp.speedY = speed.yaw / 0.1220740379f;
return pack(Q10f::GIMBAL_CMD_SET_POS, reinterpret_cast<uint8_t *>(&temp), sizeof(Q10f::GIMBAL_SET_POS_MSG_T));
}
/**
* This function sets the gimbal's follow speed
*
* @param followSpeed the speed of the gimbal
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t Q10fGimbalDriver::setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed)
{
state.maxFollow.pitch = followSpeed.pitch / 0.1220740379f;
state.maxFollow.roll = followSpeed.roll / 0.1220740379f;
state.maxFollow.yaw = followSpeed.yaw / 0.1220740379f;
return 0;
}
/**
* This function sets the gimbal to its home position
*
* @return The return value is the number of bytes written to the buffer.
*/
uint32_t Q10fGimbalDriver::setGimabalHome(void)
{
// amovGimbal::AMOV_GIMBAL_POS_T home;
// home.pitch = 0;
// home.roll = 0;
// home.yaw = 0;
// return setGimabalPos(home);
const static uint8_t cmd[5] = {0X00, 0X00, 0X03, 0X03, 0XFF};
return pack(Q10f::GIMBAL_CMD_HOME, (uint8_t *)cmd, sizeof(cmd));
}
/**
* It takes a picture.
*
* @return The return value is the number of bytes written to the serial port.
*/
uint32_t Q10fGimbalDriver::takePic(void)
{
const static uint8_t cmd[2] = {0X01, 0XFF};
return pack(Q10f::GIMBAL_CMD_CAMERA, (uint8_t *)cmd, sizeof(cmd));
}
/**
* The function sets the video state of the gimbal
*
* @param newState The new state of the video.
*
* @return The return value is the number of bytes written to the serial port.
*/
uint32_t Q10fGimbalDriver::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState)
{
uint8_t cmd[2] = {0X01, 0XFF};
if (newState == amovGimbal::AMOV_GIMBAL_VIDEO_TAKE)
{
cmd[0] = 0X02;
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_TAKE;
}
else
{
cmd[0] = 0X03;
state.video = amovGimbal::AMOV_GIMBAL_VIDEO_OFF;
}
return pack(Q10f::GIMBAL_CMD_CAMERA, (uint8_t *)cmd, sizeof(cmd));
}
uint32_t Q10fGimbalDriver::setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
{
uint8_t cmd[5] = {0X00, 0X00, 0X00, 0X00, 0XFF};
if (targetRate == 0.0f)
{
cmd[1] = 0XFF;
switch (zoom)
{
case amovGimbal::AMOV_GIMBAL_ZOOM_IN:
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_IN;
break;
case amovGimbal::AMOV_GIMBAL_ZOOM_OUT:
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_OUT;
break;
case amovGimbal::AMOV_GIMBAL_ZOOM_STOP:
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_STOP;
break;
default:
break;
}
return pack(Q10f::GIMBAL_CMD_ZOOM, (uint8_t *)cmd, 2);
}
else
{
uint16_t count = (targetRate / Q10F_MAX_ZOOM) * Q10F_MAX_ZOOM_COUNT;
cmd[0] = count & 0XF000 >> 12;
cmd[1] = count & 0X0F00 >> 8;
cmd[2] = count & 0X00F0 >> 4;
cmd[3] = count & 0X000F >> 0;
return pack(Q10f::GIMBAL_CMD_ZOOM_DIRECT, (uint8_t *)cmd, 5);
}
}
uint32_t Q10fGimbalDriver::setGimbalFocus(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
{
uint8_t cmd[2] = {0X00, 0XFF};
switch (zoom)
{
case amovGimbal::AMOV_GIMBAL_ZOOM_IN:
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_IN;
break;
case amovGimbal::AMOV_GIMBAL_ZOOM_OUT:
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_OUT;
break;
case amovGimbal::AMOV_GIMBAL_ZOOM_STOP:
cmd[0] = Q10f::GIMBAL_CMD_ZOOM_STOP;
break;
default:
break;
}
return pack(Q10f::GIMBAL_CMD_FOCUS, (uint8_t *)cmd, 2);
}

View File

@ -0,0 +1,105 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2022-10-27 18:10:07
* @LastEditors: L LC @amov
* @LastEditTime: 2023-03-28 18:15:47
* @FilePath: /gimbal-sdk-multi-platform/src/Q10f/Q10f_gimbal_struct.h
*/
#ifndef Q10F_GIMBAL_STRUCT_H
#define Q10F_GIMBAL_STRUCT_H
#include <stdint.h>
namespace Q10f
{
#define Q10F_MAX_GIMBAL_PAYLOAD 64
#define Q10F_PAYLOAD_OFFSET 4
#define Q10F_SCALE_FACTOR_ANGLE 0.02197f
#define Q10F_SCALE_FACTOR_SPEED 0.06103f
#define Q10F_MAX_ZOOM 10.0f
#define Q10F_MAX_ZOOM_COUNT 0X4000
typedef enum
{
GIMBAL_CMD_SET_POS = 0X100F01FF,
GIMBAL_CMD_GET = 0X3D003D3E,
GIMBAL_CMD_FOCUS = 0X08040181,
GIMBAL_CMD_ZOOM = 0X07040181,
GIMBAL_CMD_ZOOM_DIRECT = 0X47040181,
GIMBAL_CMD_HOME = 0X010A0181,
GIMBAL_CMD_CAMERA = 0X68040181,
GIMBAL_CMD_RCV_STATE = 0X721A583E,
GIMBAL_CMD_SET_FEEDBACK_L = 0X143055AA,
GIMBAL_CMD_SET_FEEDBACK_H = 0X003155AA,
GIMBAL_CMD_OPEN_FEEDBACK =0X3E003E3E,
GIMBAL_CMD_CLOSE_FEEDBACK =0X3D003D3E,
} GIMBAL_CMD_T;
typedef enum
{
GIMBAL_CMD_POS_MODE_NO = 0X00,
GIMBAL_CMD_POS_MODE_SPEED = 0X01,
GIMBAL_CMD_POS_MODE_ANGLE = 0X02,
GIMBAL_CMD_POS_MODE_ANGLE_SPEED = 0X03,
} GIMBAL_CMD_POS_MODE_T;
typedef enum
{
GIMBAL_CMD_ZOOM_IN = 0X27,
GIMBAL_CMD_ZOOM_OUT = 0X37,
GIMBAL_CMD_ZOOM_STOP = 0X00,
} GIMBAL_CMD_ZOOM_T;
typedef enum
{
GIMBAL_SERIAL_STATE_IDLE,
GIMBAL_SERIAL_STATE_HEAD1,
GIMBAL_SERIAL_STATE_HEAD2,
GIMBAL_SERIAL_STATE_HEAD3,
GIMBAL_SERIAL_STATE_DATE,
GIMBAL_SERIAL_STATE_CHECK
} GIMBAL_SERIAL_STATE_T;
#pragma pack(1)
typedef struct
{
uint32_t head;
uint8_t data[Q10F_MAX_GIMBAL_PAYLOAD];
uint8_t checkSum;
uint8_t len;
} GIMBAL_FRAME_T;
typedef struct
{
uint8_t modeR;
uint8_t modeP;
uint8_t modeY;
int16_t speedR;
int16_t angleR;
int16_t speedP;
int16_t angleP;
int16_t speedY;
int16_t angleY;
} GIMBAL_SET_POS_MSG_T;
typedef struct
{
uint16_t timeStamp;
int16_t rollIMUAngle;
int16_t pitchIMUAngle;
int16_t yawIMUAngle;
int16_t rollTAGAngle;
int16_t pitchTAGAngle;
int16_t yawTAGAngle;
int16_t rollTAGSpeed;
int16_t pitchTAGSpeed;
int16_t yawTAGSpeed;
int16_t rollStatorRotorAngle;
int16_t pitchStatorRotorAngle;
int16_t yawStatorRotorAngle;
} GIMBAL_RCV_POS_MSG_T;
#pragma pack()
}
#endif

View File

@ -0,0 +1,239 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2022-10-28 11:54:11
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-11 18:13:25
* @FilePath: /gimbal-sdk-multi-platform/src/amov_gimabl.cpp
*/
#include "amov_gimbal.h"
#include "g1_gimbal_driver.h"
#include "g2_gimbal_driver.h"
#include "Q10f_gimbal_driver.h"
#include <iostream>
#include <thread>
#include <map>
#include <iterator>
#define MAX_PACK_SIZE 280
typedef enum
{
AMOV_GIMBAL_TYPE_NULL,
AMOV_GIMBAL_TYPE_G1 = 1,
AMOV_GIMBAL_TYPE_G2,
AMOV_GIMBAL_TYPE_Q10,
} AMOV_GIMBAL_TYPE_T;
namespace amovGimbal
{
typedef amovGimbal::IamovGimbalBase *(*createCallback)(amovGimbal::IOStreamBase *_IO);
typedef std::map<std::string, createCallback> callbackMap;
std::map<std::string, AMOV_GIMBAL_TYPE_T> amovGimbalTypeList =
{
{"G1", AMOV_GIMBAL_TYPE_G1},
{"G2", AMOV_GIMBAL_TYPE_G2},
{"Q10f", AMOV_GIMBAL_TYPE_Q10}};
callbackMap amovGimbals =
{
{"G1", g1GimbalDriver::creat},
{"G2", g2GimbalDriver::creat},
{"Q10f", Q10fGimbalDriver::creat}};
}
/* The amovGimbalCreator class is a factory class that creates an instance of the amovGimbal class */
// Factory used to create the gimbal instance
class amovGimbalCreator
{
public:
static amovGimbal::IamovGimbalBase *createAmovGimbal(const std::string &type, amovGimbal::IOStreamBase *_IO)
{
amovGimbal::callbackMap::iterator temp = amovGimbal::amovGimbals.find(type);
if (temp != amovGimbal::amovGimbals.end())
{
return (temp->second)(_IO);
}
std::cout << type << " is Unsupported device type!" << std::endl;
return NULL;
}
private:
amovGimbalCreator()
{
}
static amovGimbalCreator *pInstance;
static amovGimbalCreator *getInstance()
{
if (pInstance == NULL)
{
pInstance = new amovGimbalCreator();
}
return pInstance;
}
~amovGimbalCreator();
};
/**
* "If the input byte is available, then parse it."
*
* The function is a loop that runs forever. It calls the IO->inPutByte() function to get a byte from
* the serial port. If the byte is available, then it calls the parser() function to parse the byte
*/
void amovGimbal::IamovGimbalBase::parserLoop(void)
{
uint8_t temp;
while (1)
{
if (IO->inPutByte(&temp))
{
parser(temp);
}
}
}
void amovGimbal::IamovGimbalBase::sendLoop(void)
{
while (1)
{
send();
}
}
void amovGimbal::IamovGimbalBase::mainLoop(void)
{
uint8_t tempBuffer[MAX_PACK_SIZE];
while (1)
{
if (getRxPack(tempBuffer))
{
convert(tempBuffer);
}
}
}
/**
* It starts two threads, one for reading data from the serial port and one for sending data to the
* serial port
*/
void amovGimbal::IamovGimbalBase::startStack(void)
{
if (!IO->isOpen())
{
IO->open();
}
std::thread mainLoop(&IamovGimbalBase::parserLoop, this);
std::thread sendLoop(&IamovGimbalBase::sendLoop, this);
mainLoop.detach();
sendLoop.detach();
}
/**
* The function creates a thread that runs the mainLoop function
*/
void amovGimbal::IamovGimbalBase::parserAuto(pStateInvoke callback)
{
this->updateGimbalStateCallback = callback;
std::thread mainLoop(&IamovGimbalBase::mainLoop, this);
mainLoop.detach();
}
amovGimbal::AMOV_GIMBAL_STATE_T amovGimbal::IamovGimbalBase::getGimabalState(void)
{
mState.lock();
AMOV_GIMBAL_STATE_T temp = state;
mState.unlock();
return temp;
}
amovGimbal::IamovGimbalBase::~IamovGimbalBase()
{
std::this_thread::sleep_for(std::chrono::milliseconds(50));
IO->close();
}
/**
* Default implementation of interface functions, not pure virtual functions for ease of extension.
*/
void amovGimbal::IamovGimbalBase::nodeSet(SET uint32_t _self, SET uint32_t _remote)
{
return;
}
uint32_t amovGimbal::IamovGimbalBase::setGimabalPos(const amovGimbal::AMOV_GIMBAL_POS_T &pos)
{
return 0;
}
uint32_t amovGimbal::IamovGimbalBase::setGimabalSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &speed)
{
return 0;
}
uint32_t amovGimbal::IamovGimbalBase::setGimabalFollowSpeed(const amovGimbal::AMOV_GIMBAL_POS_T &followSpeed)
{
return 0;
}
uint32_t amovGimbal::IamovGimbalBase::setGimabalHome(void)
{
return 0;
}
uint32_t amovGimbal::IamovGimbalBase::setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
{
return 0;
}
uint32_t amovGimbal::IamovGimbalBase::setGimbalFocus(amovGimbal::AMOV_GIMBAL_ZOOM_T zoom, float targetRate)
{
return 0;
}
uint32_t amovGimbal::IamovGimbalBase::setGimbalROI(const amovGimbal::AMOV_GIMBAL_ROI_T area)
{
return 0;
}
uint32_t amovGimbal::IamovGimbalBase::takePic(void)
{
return 0;
}
uint32_t amovGimbal::IamovGimbalBase::setVideo(const amovGimbal::AMOV_GIMBAL_VIDEO_T newState)
{
return 0;
}
/**
* The function creates a new gimbal object, which is a pointer to a new amovGimbal object, which is a
* pointer to a new Gimbal object, which is a pointer to a new IOStreamBase object
*
* @param type the type of the device, which is the same as the name of the class
* @param _IO The IOStreamBase object that is used to communicate with the device.
* @param _self the node ID of the device
* @param _remote the node ID of the remote device
*/
amovGimbal::gimbal::gimbal(const std::string &type, IOStreamBase *_IO,
uint32_t _self, uint32_t _remote)
{
typeName = type;
IO = _IO;
dev = amovGimbalCreator::createAmovGimbal(typeName, IO);
dev->nodeSet(_self, _remote);
}
amovGimbal::gimbal::~gimbal()
{
// 先干掉请求线程
std::this_thread::sleep_for(std::chrono::milliseconds(50));
delete dev;
}

View File

@ -0,0 +1,118 @@
/*
* @Description: External interface of amov gimbals
* @Author: L LC @amov
* @Date: 2022-10-27 18:34:26
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-18 11:42:05
* @FilePath: /spirecv-gimbal-sdk/gimbal_ctrl/driver/src/amov_gimbal.h
*/
#ifndef AMOV_GIMBAL_H
#define AMOV_GIMBAL_H
#include <stdint.h>
#include <stdbool.h>
#include <iostream>
#include <thread>
#include <unistd.h>
#include <mutex>
#include "amov_gimbal_struct.h"
#define MAX_QUEUE_SIZE 50
namespace amovGimbal
{
#define IN
#define OUT
#define SET
static inline void idleCallback(double &frameAngleRoll, double &frameAnglePitch, double &frameAngleYaw,
double &imuAngleRoll, double &imuAnglePitch, double &imuAngleYaw,
double &fovX, double &fovY)
{
}
// Control data input and output
class IOStreamBase
{
public:
IOStreamBase() {}
virtual ~IOStreamBase() {}
virtual bool open() = 0;
virtual bool close() = 0;
virtual bool isOpen() = 0;
virtual bool isBusy() = 0;
// These two functions need to be thread-safe
virtual bool inPutByte(IN uint8_t *byte) = 0;
virtual uint32_t outPutBytes(IN uint8_t *byte, uint32_t lenght) = 0;
};
class IamovGimbalBase
{
protected:
AMOV_GIMBAL_STATE_T state;
std::mutex mState;
IOStreamBase *IO;
pStateInvoke updateGimbalStateCallback;
virtual bool parser(IN uint8_t byte) = 0;
virtual void send(void) = 0;
virtual void convert(void *buf) = 0;
virtual uint32_t pack(IN uint32_t cmd, uint8_t *pPayload, uint8_t payloadSize) = 0;
virtual bool getRxPack(OUT void *pack) = 0;
void parserLoop(void);
void sendLoop(void);
void mainLoop(void);
public:
IamovGimbalBase(SET IOStreamBase *_IO)
{
IO = _IO;
}
virtual ~IamovGimbalBase();
void setParserCallback(pStateInvoke callback)
{
this->updateGimbalStateCallback = callback;
}
// Protocol stack function items
virtual void startStack(void);
virtual void parserAuto(pStateInvoke callback = idleCallback);
virtual void nodeSet(SET uint32_t _self, SET uint32_t _remote);
// functions
virtual AMOV_GIMBAL_STATE_T getGimabalState(void);
virtual uint32_t setGimabalPos(const AMOV_GIMBAL_POS_T &pos);
virtual uint32_t setGimabalSpeed(const AMOV_GIMBAL_POS_T &speed);
virtual uint32_t setGimabalFollowSpeed(const AMOV_GIMBAL_POS_T &followSpeed);
virtual uint32_t setGimabalHome(void);
virtual uint32_t setGimbalZoom(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
virtual uint32_t setGimbalFocus(AMOV_GIMBAL_ZOOM_T zoom, float targetRate = 0);
virtual uint32_t setGimbalROI(const AMOV_GIMBAL_ROI_T area);
virtual uint32_t takePic(void);
virtual uint32_t setVideo(const AMOV_GIMBAL_VIDEO_T newState);
};
class gimbal
{
private:
std::string typeName;
IOStreamBase *IO;
public:
IamovGimbalBase *dev;
std::string name()
{
return typeName;
}
gimbal(const std::string &type, IOStreamBase *_IO,
uint32_t _self = 0x02, uint32_t _remote = 0X80);
~gimbal();
};
}
#endif

View File

@ -0,0 +1,74 @@
/*
* @Description: Common Data Structures of gimbal
* @Author: L LC @amov
* @Date: 2022-10-31 11:56:43
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-18 10:12:33
* @FilePath: /gimbal-sdk-multi-platform/src/amov_gimbal_struct.h
*/
#include <stdint.h>
#ifndef __AMOV_GIMABL_STRUCT_H
#define __AMOV_GIMABL_STRUCT_H
namespace amovGimbal
{
typedef void (*pStateInvoke)(double &frameAngleRoll, double &frameAnglePitch, double &frameAngleYaw,
double &imuAngleRoll, double &imuAnglePitch, double &imuAngleYaw,
double &fovX, double &fovY);
typedef enum
{
AMOV_GIMBAL_MODE_LOCK,
AMOV_GIMBAL_MODE_NULOCK,
} AMOV_GIMBAL_MODE_T;
typedef enum
{
AMOV_GIMBAL_VIDEO_TAKE,
AMOV_GIMBAL_VIDEO_OFF
} AMOV_GIMBAL_VIDEO_T;
typedef enum
{
AMOV_GIMBAL_ZOOM_IN,
AMOV_GIMBAL_ZOOM_OUT,
AMOV_GIMBAL_ZOOM_STOP
} AMOV_GIMBAL_ZOOM_T;
typedef struct
{
double yaw;
double roll;
double pitch;
} AMOV_GIMBAL_POS_T;
typedef struct
{
double x;
double y;
}AMOV_GIMBAL_FOV_T;
typedef struct
{
AMOV_GIMBAL_MODE_T workMode;
AMOV_GIMBAL_VIDEO_T video;
AMOV_GIMBAL_POS_T abs;
AMOV_GIMBAL_POS_T rel;
AMOV_GIMBAL_POS_T maxFollow;
AMOV_GIMBAL_FOV_T fov;
} AMOV_GIMBAL_STATE_T;
typedef struct
{
uint32_t centreX;
uint32_t centreY;
uint32_t hight;
uint32_t width;
} AMOV_GIMBAL_ROI_T;
} // namespace amovGimbal
#endif

411
gimbal_ctrl/sv_gimbal.cpp Normal file
View File

@ -0,0 +1,411 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-04-12 09:12:52
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-18 11:37:42
* @FilePath: /spirecv-gimbal-sdk/gimbal_ctrl/sv_gimbal.cpp
*/
#include "amov_gimbal.h"
#include "amov_gimbal_struct.h"
#include "sv_gimbal.h"
#include "sv_gimbal_io.hpp"
#include <iostream>
#include <string>
#include <thread>
/**
* This function sets the serial port for a Gimbal object.
*
* @param port The parameter "port" is a constant reference to a string object. It is used to set the
* serial port for a Gimbal object.
*/
void sv::Gimbal::setSerialPort(const std::string &port)
{
this->m_serial_port = port;
}
/**
* This function sets the baud rate for the serial port of a Gimbal object.
*
* @param baud_rate baud_rate is an integer parameter that represents the baud rate (bits per second)
* for the serial port. It is used to set the communication speed between the Gimbal and the device it
* is connected to via the serial port.
*/
void sv::Gimbal::setSerialPort(int baud_rate)
{
this->m_serial_baud_rate = baud_rate;
}
/**
* This function sets the serial port parameters for a Gimbal object.
*
* @param byte_size The number of bits in each byte of serial data. It can be 5, 6, 7, 8, or 9 bits.
* @param parity Parity refers to the method of error detection in serial communication. It is used to
* ensure that the data transmitted between two devices is accurate and error-free. There are three
* types of parity: even, odd, and none. Even parity means that the number of 1s in the data byte plus
* @param stop_bits Stop bits refer to the number of bits used to indicate the end of a character. It
* is a parameter used in serial communication to ensure that the receiver knows when a character has
* ended. Common values for stop bits are 1 or 2.
* @param flowcontrol GimablSerialFlowControl is an enumeration type that represents the flow control
* settings for the serial port. It can have one of the following values:
* @param time_out The time-out parameter is an integer value that specifies the maximum amount of time
* to wait for a response from the serial port before timing out. If no response is received within
* this time, the function will return an error.
*/
void sv::Gimbal::setSerialPort(GimablSerialByteSize byte_size, GimablSerialParity parity,
GimablSerialStopBits stop_bits, GimablSerialFlowControl flowcontrol,
int time_out)
{
this->m_serial_byte_size = (int)byte_size;
this->m_serial_parity = (int)parity;
this->m_serial_stopbits = (int)stop_bits;
this->m_serial_flowcontrol = (int)flowcontrol;
this->m_serial_timeout = (int)time_out;
}
/**
* This function sets the network IP address for a Gimbal object in C++.
*
* @param ip The parameter "ip" is a constant reference to a string. It is used to set the value of the
* member variable "m_net_ip" in the class "Gimbal".
*/
void sv::Gimbal::setNetIp(const std::string &ip)
{
this->m_net_ip = ip;
}
/**
* This function sets the network port for a Gimbal object in C++.
*
* @param port The "port" parameter is an integer value that represents the network port number that
* the Gimbal object will use for communication. This function sets the value of the "m_net_port"
* member variable of the Gimbal object to the value passed in as the "port" parameter.
*/
void sv::Gimbal::setNetPort(const int &port)
{
this->m_net_port = port;
}
/**
* The function sets a parser callback for a gimbal device.
*
* @param callback callback is a function pointer of type sv::PStateInvoke. It is a callback function
* that will be invoked when the state of the Gimbal device changes. The function takes a single
* parameter of type sv::PState, which represents the new state of the Gimbal device.
*/
void sv::Gimbal::setStateCallback(sv::PStateInvoke callback)
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
pdevTemp->dev->setParserCallback(callback);
}
/**
* The function opens a communication interface with a gimbal device and sets up a parser to handle
* incoming data.
*
* @param callback callback is a function pointer to a PStateInvoke function, which is a callback
* function that will be invoked when the gimbal receives a new packet of data. The function takes in a
* PState object as its argument, which contains the current state of the gimbal. The purpose of the
* callback function
*
* @return A boolean value is being returned.
*/
bool sv::Gimbal::open(PStateInvoke callback)
{
if (this->m_gimbal_link == GimbalLink::SERIAL)
{
this->IO = new UART(this->m_serial_port,
(uint32_t)this->m_serial_baud_rate,
serial::Timeout::simpleTimeout(this->m_serial_timeout),
(serial::bytesize_t)this->m_serial_byte_size,
(serial::parity_t)this->m_serial_parity,
(serial::stopbits_t)this->m_serial_stopbits,
(serial::flowcontrol_t)this->m_serial_flowcontrol);
}
// Subsequent additions
else if (this->m_gimbal_link == sv::GimbalLink::ETHERNET_TCP)
{
return false;
}
else if (this->m_gimbal_link == sv::GimbalLink::ETHERNET_UDP)
{
return false;
}
else
{
throw "Error: Unsupported communication interface class!!!";
return false;
}
std::string driverName;
switch (this->m_gimbal_type)
{
case sv::GimbalType::G1:
driverName = "G1";
break;
case sv::GimbalType::Q10f:
driverName = "Q10f";
break;
default:
throw "Error: Unsupported driver!!!";
return false;
break;
}
this->dev = new amovGimbal::gimbal(driverName, (amovGimbal::IOStreamBase *)this->IO);
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
pdevTemp->dev->startStack();
pdevTemp->dev->parserAuto(callback);
return true;
}
/**
* This function sets the home position of a gimbal device and returns a boolean value indicating
* success or failure.
*
* @return A boolean value is being returned. If the function call `setGimabalHome()` returns a value
* greater than 0, then `true` is returned. Otherwise, `false` is returned.
*/
bool sv::Gimbal::setHome()
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
if (pdevTemp->dev->setGimabalHome() > 0)
{
return true;
}
else
{
return false;
}
}
/**
* This function sets the zoom level of a gimbal device and returns a boolean indicating success or
* failure.
*
* @param x The zoom level to set for the gimbal. It should be a positive double value.
*
* @return This function returns a boolean value. It returns true if the gimbal zoom is successfully
* set to the specified value, and false if the specified value is less than or equal to zero or if the
* setGimbalZoom function call returns a value less than or equal to zero.
*/
bool sv::Gimbal::setZoom(double x)
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
if (x <= 0.0)
{
return false;
}
if (pdevTemp->dev->setGimbalZoom(amovGimbal::AMOV_GIMBAL_ZOOM_STOP, x) > 0)
{
return true;
}
else
{
return false;
}
}
/**
* This function sets the auto zoom state of a gimbal device.
*
* @param state The state parameter is an integer that represents the desired state of the auto zoom
* feature. It is used to enable or disable the auto zoom feature of the gimbal. A value of 1 enables
* the auto zoom feature, while a value of 0 disables it.
*
* @return This function returns a boolean value. It returns true if the setGimbalZoom function call is
* successful and false if it fails.
*/
bool sv::Gimbal::setAutoZoom(int state)
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
if (pdevTemp->dev->setGimbalZoom((amovGimbal::AMOV_GIMBAL_ZOOM_T)state, 0.0f) > 0)
{
return true;
}
else
{
return false;
}
}
/**
* This function sets the autofocus state of a gimbal device and returns a boolean indicating success
* or failure.
*
* @param state The state parameter is an integer that represents the desired autofocus state. It is
* likely that a value of 1 represents autofocus enabled and a value of 0 represents autofocus
* disabled, but without more context it is impossible to say for certain.
*
* @return This function returns a boolean value. It returns true if the setGimbalFocus function call
* is successful and returns a value greater than 0, and false if the function call fails and returns a
* value less than or equal to 0.
*/
bool sv::Gimbal::setAutoFocus(int state)
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
if (pdevTemp->dev->setGimbalFocus((amovGimbal::AMOV_GIMBAL_ZOOM_T)state, 0.0f) > 0)
{
return true;
}
else
{
return false;
}
}
/**
* The function takes a photo using a gimbal device and returns true if successful, false otherwise.
*
* @return A boolean value is being returned. It will be true if the function call to takePic() returns
* a value greater than 0, and false otherwise.
*/
bool sv::Gimbal::takePhoto()
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
if (pdevTemp->dev->takePic() > 0)
{
return true;
}
else
{
return false;
}
}
/**
* The function takes a state parameter and sets the video state of a gimbal device accordingly.
*
* @param state The state parameter is an integer that determines the desired state of the video
* recording function of the Gimbal device. It can have two possible values: 0 for turning off the
* video recording and 1 for starting the video recording.
*
* @return a boolean value. It returns true if the video state was successfully set to the desired
* state (either off or take), and false if there was an error in setting the state.
*/
bool sv::Gimbal::takeVideo(int state)
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
amovGimbal::AMOV_GIMBAL_VIDEO_T newState;
switch (state)
{
case 0:
newState = amovGimbal::AMOV_GIMBAL_VIDEO_OFF;
break;
case 1:
newState = amovGimbal::AMOV_GIMBAL_VIDEO_TAKE;
break;
default:
newState = amovGimbal::AMOV_GIMBAL_VIDEO_OFF;
break;
}
if (pdevTemp->dev->setVideo(newState) > 0)
{
return true;
}
else
{
return false;
}
}
/**
* This function returns the current state of the video on a gimbal device.
*
* @return an integer value that represents the state of the video on the gimbal. If the video is being
* taken, it returns 1. If the video is off, it returns 0. If the state is unknown, it throws an
* exception with the message "Unknown state information!!!".
*/
int sv::Gimbal::getVideoState()
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
int ret;
amovGimbal::AMOV_GIMBAL_STATE_T state;
state = pdevTemp->dev->getGimabalState();
if (state.video == amovGimbal::AMOV_GIMBAL_VIDEO_TAKE)
{
ret = 1;
}
else if (state.video == amovGimbal::AMOV_GIMBAL_VIDEO_OFF)
{
ret = 0;
}
else
{
throw "Unknown state information!!!";
}
return ret;
}
/**
* The function sets the angle and rate of a gimbal using Euler angles.
*
* @param roll The desired roll angle of the gimbal in degrees.
* @param pitch The desired pitch angle of the gimbal in degrees.
* @param yaw The desired yaw angle in degrees. Yaw is the rotation around the vertical axis.
* @param roll_rate The rate at which the gimbal should rotate around the roll axis, in degrees per
* second.
* @param pitch_rate The desired pitch rotation rate in degrees per second. If it is set to 0, it will
* default to 360 degrees per second.
* @param yaw_rate The rate at which the yaw angle of the gimbal should change, in degrees per second.
*/
void sv::Gimbal::setAngleEuler(double roll, double pitch, double yaw,
double roll_rate, double pitch_rate, double yaw_rate)
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
amovGimbal::AMOV_GIMBAL_POS_T temp;
if (pitch_rate == 0.0f)
pitch_rate = 360;
if (roll_rate == 0.0f)
roll_rate = 360;
if (yaw_rate == 0.0f)
yaw_rate = 360;
temp.pitch = pitch_rate;
temp.roll = roll_rate;
temp.yaw = yaw_rate;
pdevTemp->dev->setGimabalFollowSpeed(temp);
temp.pitch = pitch;
temp.roll = roll;
temp.yaw = yaw;
pdevTemp->dev->setGimabalPos(temp);
}
/**
* This function sets the angle rate of a gimbal using Euler angles.
*
* @param roll_rate The rate of change of the roll angle of the gimbal, measured in degrees per second.
* @param pitch_rate The rate of change of pitch angle in degrees per second.
* @param yaw_rate The rate of change of the yaw angle of the gimbal. Yaw is the rotation of the gimbal
* around the vertical axis.
*/
void sv::Gimbal::setAngleRateEuler(double roll_rate, double pitch_rate, double yaw_rate)
{
amovGimbal::gimbal *pdevTemp = (amovGimbal::gimbal *)this->dev;
amovGimbal::AMOV_GIMBAL_POS_T temp;
temp.pitch = pitch_rate;
temp.roll = roll_rate;
temp.yaw = yaw_rate;
pdevTemp->dev->setGimabalSpeed(temp);
}
sv::Gimbal::~Gimbal()
{
delete (amovGimbal::gimbal *)this->dev;
delete (amovGimbal::IOStreamBase *)this->IO;
}

View File

@ -0,0 +1,68 @@
/*
* @Description:
* @Author: L LC @amov
* @Date: 2023-04-12 12:22:09
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-13 10:17:21
* @FilePath: /spirecv-gimbal-sdk/gimbal_ctrl/sv_gimbal_io.hpp
*/
#ifndef __SV_GIMABL_IO_H
#define __SV_GIMABL_IO_H
#include "amov_gimbal.h"
#include "serial/serial.h"
class UART : public amovGimbal::IOStreamBase
{
private:
serial::Serial *ser;
public:
virtual bool open()
{
ser->open();
return true;
}
virtual bool close()
{
ser->close();
return true;
}
virtual bool isOpen()
{
return ser->isOpen();
}
virtual bool isBusy()
{
return false;
}
virtual bool inPutByte(IN uint8_t *byte)
{
if (ser->available() > 0)
{
ser->read(byte, 1);
return true;
}
return false;
}
virtual uint32_t outPutBytes(IN uint8_t *byte, uint32_t lenght)
{
return ser->write(byte, lenght);
}
UART(const std::string &port, uint32_t baudrate, serial::Timeout timeout,
serial::bytesize_t bytesize, serial::parity_t parity, serial::stopbits_t stopbits,
serial::flowcontrol_t flowcontrol)
{
ser = new serial::Serial(port, baudrate, timeout, bytesize, parity, stopbits, flowcontrol);
}
~UART()
{
ser->close();
delete ser;
}
};
#endif

172
include/sv_algorithm_base.h Normal file
View File

@ -0,0 +1,172 @@
#ifndef __SV_ALGORITHM__
#define __SV_ALGORITHM__
#include "sv_video_base.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
namespace yaed {
class EllipseDetector;
}
namespace sv {
// union JsonValue;
// class JsonAllocator;
class CameraAlgorithm
{
public:
CameraAlgorithm();
~CameraAlgorithm();
void loadCameraParams(std::string yaml_fn_);
void loadAlgorithmParams(std::string json_fn_);
cv::Mat camera_matrix;
cv::Mat distortion;
int image_width;
int image_height;
double fov_x;
double fov_y;
std::string alg_params_fn;
protected:
// JsonValue* _value;
// JsonAllocator* _allocator;
std::chrono::system_clock::time_point _t0;
};
class ArucoDetector : public CameraAlgorithm
{
public:
ArucoDetector();
void detect(cv::Mat img_, TargetsInFrame& tgts_);
private:
void _load();
bool _params_loaded;
cv::Ptr<cv::aruco::DetectorParameters> _detector_params;
cv::Ptr<cv::aruco::Dictionary> _dictionary;
int _dictionary_id;
std::vector<int> _ids_need;
std::vector<double> _lengths_need;
};
class EllipseDetector : public CameraAlgorithm
{
public:
EllipseDetector();
~EllipseDetector();
void detectAllInDirectory(std::string input_img_dir_, std::string output_json_dir_);
void detect(cv::Mat img_, TargetsInFrame& tgts_);
protected:
void _load();
bool _params_loaded;
yaed::EllipseDetector* _ed;
float _max_center_distance_ratio;
double _radius_in_meter;
};
class LandingMarkerDetectorBase : public EllipseDetector
{
public:
LandingMarkerDetectorBase();
~LandingMarkerDetectorBase();
void detect(cv::Mat img_, TargetsInFrame& tgts_);
bool isParamsLoaded();
int getMaxCandidates();
std::vector<std::string> getLabelsNeed();
protected:
virtual bool setupImpl();
virtual void roiCNN(std::vector<cv::Mat>& input_rois_, std::vector<int>& output_labels_);
void _loadLabels();
int _max_candidates;
std::vector<std::string> _labels_need;
};
class SingleObjectTrackerBase : public CameraAlgorithm
{
public:
SingleObjectTrackerBase();
~SingleObjectTrackerBase();
void warmUp();
void init(cv::Mat img_, const cv::Rect& bounding_box_);
void track(cv::Mat img_, TargetsInFrame& tgts_);
bool isParamsLoaded();
std::string getAlgorithm();
int getBackend();
int getTarget();
protected:
virtual bool setupImpl();
virtual void initImpl(cv::Mat img_, const cv::Rect& bounding_box_);
virtual bool trackImpl(cv::Mat img_, cv::Rect& output_bbox_);
void _load();
bool _params_loaded;
std::string _algorithm;
int _backend;
int _target;
};
class CommonObjectDetectorBase : public CameraAlgorithm
{
public:
CommonObjectDetectorBase();
~CommonObjectDetectorBase();
void warmUp();
void detect(cv::Mat img_, TargetsInFrame& tgts_, Box* roi_=nullptr, int img_w_=0, int img_h_=0);
bool isParamsLoaded();
std::string getDataset();
std::vector<std::string> getClassNames();
std::vector<double> getClassWs();
std::vector<double> getClassHs();
int getInputH();
void setInputH(int h_);
int getInputW();
void setInputW(int w_);
int getClassNum();
int getOutputSize();
double getThrsNms();
double getThrsConf();
int useWidthOrHeight();
bool withSegmentation();
protected:
virtual bool setupImpl();
virtual void detectImpl(
cv::Mat img_,
std::vector<float>& boxes_x_,
std::vector<float>& boxes_y_,
std::vector<float>& boxes_w_,
std::vector<float>& boxes_h_,
std::vector<int>& boxes_label_,
std::vector<float>& boxes_score_,
std::vector<cv::Mat>& boxes_seg_
);
void _load();
bool _params_loaded;
std::string _dataset;
std::vector<std::string> _class_names;
std::vector<double> _class_ws;
std::vector<double> _class_hs;
int _input_h;
int _input_w;
int _n_classes;
int _output_size;
double _thrs_nms;
double _thrs_conf;
int _use_width_or_height;
bool _with_segmentation;
};
}
#endif

39
include/sv_common_det.h Normal file
View File

@ -0,0 +1,39 @@
#ifndef __SV_COMMON_DET__
#define __SV_COMMON_DET__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
namespace sv {
class CommonObjectDetectorCUDAImpl;
class CommonObjectDetector : public CommonObjectDetectorBase
{
public:
CommonObjectDetector();
~CommonObjectDetector();
protected:
bool setupImpl();
void detectImpl(
cv::Mat img_,
std::vector<float>& boxes_x_,
std::vector<float>& boxes_y_,
std::vector<float>& boxes_w_,
std::vector<float>& boxes_h_,
std::vector<int>& boxes_label_,
std::vector<float>& boxes_score_,
std::vector<cv::Mat>& boxes_seg_
);
CommonObjectDetectorCUDAImpl* _cuda_impl;
};
}
#endif

8
include/sv_core.h Normal file
View File

@ -0,0 +1,8 @@
#ifndef __SV_CORE__
#define __SV_CORE__
#include "sv_video_base.h"
#include "sv_gimbal.h"
#include "sv_algorithm_base.h"
#endif

162
include/sv_gimbal.h Normal file
View File

@ -0,0 +1,162 @@
/*
* @Description:
* @Author: jario-jin @amov
* @Date: 2023-04-12 09:12:52
* @LastEditors: L LC @amov
* @LastEditTime: 2023-04-18 11:49:27
* @FilePath: /spirecv-gimbal-sdk/include/sv_gimbal.h
*/
#ifndef __SV_GIMBAL__
#define __SV_GIMBAL__
#include <string>
namespace sv
{
typedef void (*PStateInvoke)(double &frame_ang_r, double &frame_ang_p, double &frame_ang_y,
double &imu_ang_r, double &imu_ang_p, double &imu_ang_y,
double &fov_x, double &fov_y);
enum class GimbalType
{
G1,
Q10f
};
enum class GimbalLink
{
SERIAL,
ETHERNET_TCP,
ETHERNET_UDP
};
enum class GimablSerialByteSize
{
FIVE_BYTES = 5,
SIX_BYTES = 6,
SEVEN_BYTES = 7,
EIGHT_BYTES = 8,
};
enum class GimablSerialParity
{
PARITY_NONE = 0,
PARITY_ODD = 1,
PARITY_EVEN = 2,
PARITY_MARK = 3,
PARITY_SPACE = 4,
};
enum class GimablSerialStopBits
{
STOPBITS_ONE = 1,
STOPBITS_TWO = 2,
STOPBITS_ONE_POINT_FIVE = 3,
};
enum class GimablSerialFlowControl
{
FLOWCONTROL_NONE = 0,
FLOWCONTROL_SOFTWARE = 1,
FLOWCONTROL_HARDWARE = 2,
};
static inline void emptyCallback(double &frameAngleRoll, double &frameAnglePitch, double &frameAngleYaw,
double &imuAngleRoll, double &imuAnglePitch, double &imuAngleYaw,
double &fovX, double &fovY)
{
}
//! A gimbal control and state reading class.
/*!
A common gimbal control class for vary type of gimbals.
e.g. AMOV G1
*/
class Gimbal
{
private:
// Device pointers
void *dev;
void *IO;
// Generic serial interface parameters list & default parameters
std::string m_serial_port = "/dev/ttyUSB0";
int m_serial_baud_rate = 115200;
int m_serial_byte_size = (int)GimablSerialByteSize::EIGHT_BYTES;
int m_serial_parity = (int)GimablSerialParity::PARITY_NONE;
int m_serial_stopbits = (int)GimablSerialStopBits::STOPBITS_ONE;
int m_serial_flowcontrol = (int)GimablSerialFlowControl::FLOWCONTROL_NONE;
int m_serial_timeout = 500;
// Ethernet interface parameters list & default parameters
std::string m_net_ip = "192.168.2.64";
int m_net_port = 9090;
GimbalType m_gimbal_type;
GimbalLink m_gimbal_link;
public:
//! Constructor
/*!
\param serial_port: string like '/dev/ttyUSB0' in linux sys.
\param baud_rate: serial baud rate, e.g. 115200
*/
Gimbal(GimbalType gtype = GimbalType::G1, GimbalLink ltype = GimbalLink::SERIAL)
{
m_gimbal_type = gtype;
m_gimbal_link = ltype;
}
~Gimbal();
// set Generic serial interface parameters
void setSerialPort(const std::string &port);
void setSerialPort(const int baud_rate);
void setSerialPort(GimablSerialByteSize byte_size, GimablSerialParity parity,
GimablSerialStopBits stop_bits, GimablSerialFlowControl flowcontrol,
int time_out = 500);
// set Ethernet interface parameters
void setNetIp(const std::string &ip);
void setNetPort(const int &port);
// Create a device instance
void setStateCallback(PStateInvoke callback);
bool open(PStateInvoke callback = emptyCallback);
// Funtions
bool setHome();
bool setZoom(double x);
bool setAutoZoom(int state);
bool setAutoFocus(int state);
bool takePhoto();
bool takeVideo(int state);
int getVideoState();
//! Set gimbal angles
/*!
\param roll: eular roll angle (-60, 60) degree
\param pitch: eular pitch angle (-135, 135) degree
\param yaw: eular yaw angle (-150, 150) degree
\param roll_rate: roll angle rate, degree/s
\param pitch_rate: pitch angle rate, degree/s
\param yaw_rate: yaw angle rate, degree/s
*/
void setAngleEuler(
double roll,
double pitch,
double yaw,
double roll_rate = 0,
double pitch_rate = 0,
double yaw_rate = 0);
//! Set gimbal angle rates
/*!
\param roll_rate: roll angle rate, degree/s
\param pitch_rate: pitch angle rate, degree/s
\param yaw_rate: yaw angle rate, degree/s
*/
void setAngleRateEuler(
double roll_rate,
double pitch_rate,
double yaw_rate);
};
}
#endif

33
include/sv_landing_det.h Normal file
View File

@ -0,0 +1,33 @@
#ifndef __SV_LANDING_DET__
#define __SV_LANDING_DET__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
namespace sv {
class LandingMarkerDetectorCUDAImpl;
class LandingMarkerDetector : public LandingMarkerDetectorBase
{
public:
LandingMarkerDetector();
~LandingMarkerDetector();
protected:
bool setupImpl();
void roiCNN(
std::vector<cv::Mat>& input_rois_,
std::vector<int>& output_labels_
);
LandingMarkerDetectorCUDAImpl* _cuda_impl;
};
}
#endif

32
include/sv_tracking.h Normal file
View File

@ -0,0 +1,32 @@
#ifndef __SV_TRACKING__
#define __SV_TRACKING__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
namespace sv {
class SingleObjectTrackerOCV470Impl;
class SingleObjectTracker : public SingleObjectTrackerBase
{
public:
SingleObjectTracker();
~SingleObjectTracker();
protected:
bool setupImpl();
void initImpl(cv::Mat img_, const cv::Rect& bounding_box_);
bool trackImpl(cv::Mat img_, cv::Rect& output_bbox_);
SingleObjectTrackerOCV470Impl* _ocv470_impl;
};
}
#endif

399
include/sv_video_base.h Normal file
View File

@ -0,0 +1,399 @@
#ifndef __SV_VIDEOIO__
#define __SV_VIDEOIO__
#include <string>
#include <vector>
#include <opencv2/opencv.hpp>
#include <fstream>
#include <queue>
#include <stack>
#include <thread>
#include <arpa/inet.h>
#include <netinet/in.h> // for sockaddr_in
#define SV_RAD2DEG 57.2957795
// #define X86_PLATFORM
// #define JETSON_PLATFORM
namespace sv {
//! The rectangle bounding-box of an object.
class Box
{
public:
Box();
int x1;
int y1;
int x2;
int y2;
//! Set the parameters of the bounding-box by XYXY-format.
/*!
\param x1_: The x-axis pixel coordinates of the top-left point.
\param y1_: The y-axis pixel coordinates of the top-left point.
\param x2_: The x-axis pixel coordinates of the bottom-right point.
\param y2_: The y-axis pixel coordinates of the bottom-right point.
*/
void setXYXY(int x1_, int y1_, int x2_, int y2_);
//! Set the parameters of the bounding-box by XYWH-format.
/*!
\param x1_: The x-axis pixel coordinates of the top-left point.
\param y1_: The y-axis pixel coordinates of the top-left point.
\param w_: The width of the bounding rectangle.
\param h_: The height of the bounding rectangle.
*/
void setXYWH(int x_, int y_, int w_, int h_);
};
//! Description class for a single target detection result.
/*!
Support multiple description methods,
such as bounding box, segmentation, ellipse, three-dimensional position, etc.
*/
class Target
{
public:
Target();
//! X coordinate of object center point, [0, 1], (Required)
double cx;
//! Y coordinate of object center point, [0, 1], (Required)
double cy;
//! Object-width / image-width, (0, 1]
double w;
//! Object-height / image-heigth, (0, 1]
double h;
//! Objectness, Confidence, (0, 1]
double score;
//! Category of target.
std::string category;
//! Category ID of target.
int category_id;
//! The same target in different frames shares a unique ID.
int tracked_id;
//! X coordinate of object position in Camera-Frame (unit: meter).
double px;
//! Y coordinate of object position in Camera-Frame (unit: meter).
double py;
//! Z coordinate of object position in Camera-Frame (unit: meter).
double pz;
//! Line of sight (LOS) angle on X-axis (unit: degree).
double los_ax;
//! Line of sight (LOS) angle on Y-axis (unit: degree).
double los_ay;
//! The angle of the target in the image coordinate system, (unit: degree) [-180, 180].
double yaw_a;
//! Whether the height&width of the target can be obtained.
bool has_hw;
//! Whether the category of the target can be obtained.
bool has_category;
//! Whether the tracking-ID of the target can be obtained.
bool has_tid;
//! Whether the 3D-position of the target can be obtained.
bool has_position;
//! Whether the LOS-angle of the target can be obtained.
bool has_los;
//! Whether the segmentation of the target can be obtained.
bool has_seg;
//! Whether the bounding-box of the target can be obtained.
bool has_box;
//! Whether the ellipse-parameters of the target can be obtained.
bool has_ell;
//! Whether the aruco-parameters of the target can be obtained.
bool has_aruco;
//! Whether the direction of the target can be obtained.
bool has_yaw;
void setCategory(std::string cate_, int cate_id_);
void setLOS(double cx_, double cy_, cv::Mat camera_matrix_, int img_w_, int img_h_);
void setTrackID(int id_);
void setPosition(double x_, double y_, double z_);
void setBox(int x1_, int y1_, int x2_, int y2_, int img_w_, int img_h_);
void setAruco(int id_, std::vector<cv::Point2f> corners_, cv::Vec3d rvecs_, cv::Vec3d tvecs_, int img_w_, int img_h_, cv::Mat camera_matrix_);
void setEllipse(double xc_, double yc_, double a_, double b_, double rad_, double score_, int img_w_, int img_h_, cv::Mat camera_matrix_, double radius_in_meter_);
void setYaw(double vec_x_, double vec_y);
void setMask(cv::Mat mask_);
cv::Mat getMask();
bool getBox(Box& b);
bool getAruco(int& id, std::vector<cv::Point2f> &corners);
bool getEllipse(double& xc_, double& yc_, double& a_, double& b_, double& rad_);
std::string getJsonStr();
private:
//! segmentation [[x1,y1, x2,y2, x3,y3,...],...]
/*!
SEG variables: (_s_) segmentation, segmentation_size_h, segmentation_size_w, segmentation_counts, area
*/
std::vector<std::vector<double> > _s_segmentation;
int _s_segmentation_size_h;
int _s_segmentation_size_w;
std::string _s_segmentation_counts;
cv::Mat _mask;
double _s_area;
//! bounding box [x, y, w, h]
/*!
BOX variables: (_b_) box
*/
Box _b_box; // x,y,w,h
//! ellipse x-axis center
/*!
ELL variables: (_e_) xc, yc, a, b, rad
*/
double _e_xc;
double _e_yc;
double _e_a;
double _e_b;
double _e_rad;
//! Aruco Marker ID
/*!
ARUCO variables: (_a_) id, corners, rvecs, tvecs
*/
int _a_id;
std::vector<cv::Point2f> _a_corners;
cv::Vec3d _a_rvecs;
cv::Vec3d _a_tvecs;
};
enum class MissionType {NONE, COMMON_DET, TRACKING, ARUCO_DET, LANDMARK_DET, ELLIPSE_DET};
//! This class describes all objects in a single frame image.
/*!
1. Contains multiple Target instances.
2. Describes the ID of the current frame, image width and height, current field of view, etc.
3. Describes the processed image sub-regions and supports local region detection.
*/
class TargetsInFrame
{
public:
TargetsInFrame(int frame_id_);
//! Frame number.
int frame_id;
//! Frame/image height.
int height;
//! Frame/image width.
int width;
//! Detection frame per second (FPS).
double fps;
//! The x-axis field of view (FOV) of the current camera.
double fov_x;
//! The y-axis field of view (FOV) of the current camera.
double fov_y;
//! 吊舱俯仰角
double pod_patch;
//! 吊舱滚转角
double pod_roll;
//! 吊舱航向角东向为0东北天为正范围[-180,180]
double pod_yaw;
//! 当前经度
double longitude;
//! 当前纬度
double latitude;
//! 当前飞行高度
double altitude;
//! 飞行速度x轴东北天坐标系
double uav_vx;
//! 飞行速度y轴东北天坐标系
double uav_vy;
//! 飞行速度z轴东北天坐标系
double uav_vz;
//! 当前光照强度Lux
double illumination;
//! Whether the detection FPS can be obtained.
bool has_fps;
//! Whether the FOV can be obtained.
bool has_fov;
//! Whether the processed image sub-region can be obtained.
bool has_roi;
bool has_pod_info;
bool has_uav_pos;
bool has_uav_vel;
bool has_ill;
MissionType type;
//! The processed image sub-region, if size>0, it means no full image detection.
std::vector<Box> rois;
//! Detected Target Instances.
std::vector<Target> targets;
std::string date_captured;
void setTimeNow();
void setFPS(double fps_);
void setFOV(double fov_x_, double fov_y_);
void setSize(int width_, int height_);
std::string getJsonStr();
};
class UDPServer {
public:
UDPServer(std::string dest_ip="127.0.0.1", int port=20166);
~UDPServer();
void send(const TargetsInFrame& tgts_);
private:
struct sockaddr_in _servaddr;
int _sockfd;
};
class VideoWriterBase {
public:
VideoWriterBase();
~VideoWriterBase();
void setup(std::string file_path, cv::Size size, double fps=25.0, bool with_targets=false);
void write(cv::Mat image, TargetsInFrame tgts=TargetsInFrame(0));
void release();
cv::Size getSize();
double getFps();
std::string getFilePath();
bool isRunning();
protected:
virtual bool setupImpl(std::string file_name_);
virtual bool isOpenedImpl();
virtual void writeImpl(cv::Mat img_);
virtual void releaseImpl();
void _init();
void _run();
bool _is_running;
cv::Size _image_size;
double _fps;
bool _with_targets;
int _fid;
int _fcnt;
std::thread _tt;
// cv::VideoWriter _writer;
std::ofstream _targets_ofs;
std::string _file_path;
std::queue<cv::Mat> _image_to_write;
std::queue<TargetsInFrame> _tgts_to_write;
};
class VideoStreamerBase {
public:
VideoStreamerBase();
~VideoStreamerBase();
void setup(cv::Size size, int port=8554, int bitrate=2, std::string url="/live"); // 2M
void stream(cv::Mat image);
void release();
cv::Size getSize();
int getPort();
std::string getUrl();
int getBitrate();
bool isRunning();
protected:
virtual bool setupImpl();
virtual bool isOpenedImpl();
virtual void writeImpl(cv::Mat image);
virtual void releaseImpl();
void _run();
bool _is_running;
cv::Size _stream_size;
int _port;
std::string _url;
int _bitrate;
std::thread _tt;
std::stack<cv::Mat> _image_to_stream;
};
enum class CameraType {NONE, WEBCAM, G1, Q10};
class CameraBase {
public:
CameraBase(CameraType type=CameraType::NONE, int id=0);
~CameraBase();
void open(CameraType type=CameraType::WEBCAM, int id=0);
bool read(cv::Mat& image);
void release();
int getW();
int getH();
int getFps();
std::string getIp();
int getPort();
double getBrightness();
double getContrast();
double getSaturation();
double getHue();
double getExposure();
bool isRunning();
void setWH(int width, int height);
void setFps(int fps);
void setIp(std::string ip);
void setPort(int port);
void setBrightness(double brightness);
void setContrast(double contrast);
void setSaturation(double saturation);
void setHue(double hue);
void setExposure(double exposure);
protected:
virtual void openImpl();
void _run();
bool _is_running;
bool _is_updated;
std::thread _tt;
cv::VideoCapture _cap;
cv::Mat _frame;
CameraType _type;
int _camera_id;
int _width;
int _height;
int _fps;
std::string _ip;
int _port;
double _brightness;
double _contrast;
double _saturation;
double _hue;
double _exposure;
};
void drawTargetsInFrame(
cv::Mat& img_,
const TargetsInFrame& tgts_,
bool with_all=true,
bool with_category=false,
bool with_tid=false,
bool with_seg=false,
bool with_box=false,
bool with_ell=false,
bool with_aruco=false,
bool with_yaw=false
);
std::string get_home();
bool is_file_exist(std::string& fn);
void list_dir(std::string dir, std::vector<std::string>& files, std::string suffixs="", bool r=false);
}
#endif

27
include/sv_video_input.h Normal file
View File

@ -0,0 +1,27 @@
#ifndef __SV_VIDEO_INPUT__
#define __SV_VIDEO_INPUT__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
namespace sv {
class Camera : public CameraBase
{
public:
Camera();
~Camera();
protected:
void openImpl();
};
}
#endif

53
include/sv_video_output.h Normal file
View File

@ -0,0 +1,53 @@
#ifndef __SV_VIDEO_OUTPUT__
#define __SV_VIDEO_OUTPUT__
#include "sv_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
class BsVideoSaver;
class BsPushStreamer;
namespace sv {
class VideoWriterGstreamerImpl;
class VideoStreamerGstreamerImpl;
class VideoWriter : public VideoWriterBase
{
public:
VideoWriter();
~VideoWriter();
protected:
bool setupImpl(std::string file_name_);
bool isOpenedImpl();
void writeImpl(cv::Mat img_);
void releaseImpl();
VideoWriterGstreamerImpl* _gstreamer_impl;
BsVideoSaver* _ffmpeg_impl;
};
class VideoStreamer : public VideoStreamerBase
{
public:
VideoStreamer();
~VideoStreamer();
protected:
bool setupImpl();
bool isOpenedImpl();
void writeImpl(cv::Mat img_);
void releaseImpl();
VideoStreamerGstreamerImpl* _gstreamer_impl;
BsPushStreamer* _ffmpeg_impl;
};
}
#endif

12
include/sv_world.h Normal file
View File

@ -0,0 +1,12 @@
#ifndef __SV__WORLD__
#define __SV__WORLD__
#include "sv_core.h"
#include "sv_common_det.h"
#include "sv_landing_det.h"
#include "sv_tracking.h"
#include "sv_video_input.h"
#include "sv_video_output.h"
#endif

120
samples/SpireCVDet.cpp Normal file
View File

@ -0,0 +1,120 @@
#include "yolov7/cuda_utils.h"
#include "yolov7/logging.h"
#include "yolov7/utils.h"
#include "yolov7/preprocess.h"
#include "yolov7/postprocess.h"
#include "yolov7/model.h"
#include <iostream>
#include <chrono>
#include <cmath>
using namespace nvinfer1;
static Logger gLogger;
const static int kInputH = 640;
const static int kInputW = 640;
const static int kInputH_HD = 1280;
const static int kInputW_HD = 1280;
const static int kOutputSize = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, bool& is_p6, float& gd, float& gw, std::string& img_dir, int& n_classes) {
if (argc < 4) return false;
if (std::string(argv[1]) == "-s" && (argc == 6 || argc == 8)) {
wts = std::string(argv[2]);
engine = std::string(argv[3]);
n_classes = atoi(argv[4]);
if (n_classes < 1)
return false;
auto net = std::string(argv[5]);
if (net[0] == 'n') {
gd = 0.33;
gw = 0.25;
} else if (net[0] == 's') {
gd = 0.33;
gw = 0.50;
} else if (net[0] == 'm') {
gd = 0.67;
gw = 0.75;
} else if (net[0] == 'l') {
gd = 1.0;
gw = 1.0;
} else if (net[0] == 'x') {
gd = 1.33;
gw = 1.25;
} else if (net[0] == 'c' && argc == 8) {
gd = atof(argv[6]);
gw = atof(argv[7]);
} else {
return false;
}
if (net.size() == 2 && net[1] == '6') {
is_p6 = true;
}
} else {
return false;
}
return true;
}
void serialize_engine(unsigned int max_batchsize, bool& is_p6, float& gd, float& gw, std::string& wts_name, std::string& engine_name, int n_classes) {
// Create builder
IBuilder* builder = createInferBuilder(gLogger);
IBuilderConfig* config = builder->createBuilderConfig();
// Create model to populate the network, then set the outputs and create an engine
ICudaEngine *engine = nullptr;
if (is_p6) {
engine = build_det_p6_engine(max_batchsize, builder, config, DataType::kFLOAT, gd, gw, wts_name, kInputH_HD, kInputW_HD, n_classes);
} else {
engine = build_det_engine(max_batchsize, builder, config, DataType::kFLOAT, gd, gw, wts_name, kInputH, kInputW, n_classes);
}
assert(engine != nullptr);
// Serialize the engine
IHostMemory* serialized_engine = engine->serialize();
assert(serialized_engine != nullptr);
// Save engine to file
std::ofstream p(engine_name, std::ios::binary);
if (!p) {
std::cerr << "Could not open plan output file" << std::endl;
assert(false);
}
p.write(reinterpret_cast<const char*>(serialized_engine->data()), serialized_engine->size());
// Close everything down
engine->destroy();
builder->destroy();
config->destroy();
serialized_engine->destroy();
}
int main(int argc, char** argv) {
cudaSetDevice(kGpuId);
std::string wts_name = "";
std::string engine_name = "";
bool is_p6 = false;
float gd = 0.0f, gw = 0.0f;
std::string img_dir;
int n_classes;
if (!parse_args(argc, argv, wts_name, engine_name, is_p6, gd, gw, img_dir, n_classes)) {
std::cerr << "arguments not right!" << std::endl;
std::cerr << "./SpireCVDet -s [.wts] [.engine] n_classes [n/s/m/l/x/n6/s6/m6/l6/x6 or c/c6 gd gw] // serialize model to plan file" << std::endl;
// std::cerr << "./SpireCVDet -d [.engine] ../images // deserialize plan file and run inference" << std::endl;
return -1;
}
std::cout << "n_classes: " << n_classes << std::endl;
// Create a model using the API directly and serialize it to a file
if (!wts_name.empty()) {
serialize_engine(kBatchSize, is_p6, gd, gw, wts_name, engine_name, n_classes);
return 0;
}
return 0;
}

120
samples/SpireCVSeg.cpp Normal file
View File

@ -0,0 +1,120 @@
#include "yolov7/config.h"
#include "yolov7/cuda_utils.h"
#include "yolov7/logging.h"
#include "yolov7/utils.h"
#include "yolov7/preprocess.h"
#include "yolov7/postprocess.h"
#include "yolov7/model.h"
#include <iostream>
#include <chrono>
#include <cmath>
using namespace nvinfer1;
static Logger gLogger;
const static int kInputH = 640;
const static int kInputW = 640;
const static int kOutputSize1 = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
const static int kOutputSize2 = 32 * (kInputH / 4) * (kInputW / 4);
bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, float& gd, float& gw, std::string& img_dir, std::string& labels_filename, int& n_classes) {
if (argc < 4) return false;
if (std::string(argv[1]) == "-s" && (argc == 6 || argc == 8)) {
wts = std::string(argv[2]);
engine = std::string(argv[3]);
n_classes = atoi(argv[4]);
if (n_classes < 1)
return false;
auto net = std::string(argv[5]);
if (net[0] == 'n') {
gd = 0.33;
gw = 0.25;
} else if (net[0] == 's') {
gd = 0.33;
gw = 0.50;
} else if (net[0] == 'm') {
gd = 0.67;
gw = 0.75;
} else if (net[0] == 'l') {
gd = 1.0;
gw = 1.0;
} else if (net[0] == 'x') {
gd = 1.33;
gw = 1.25;
} else if (net[0] == 'c' && argc == 8) {
gd = atof(argv[6]);
gw = atof(argv[7]);
} else {
return false;
}
} else if (std::string(argv[1]) == "-d" && argc == 5) {
engine = std::string(argv[2]);
img_dir = std::string(argv[3]);
labels_filename = std::string(argv[4]);
} else {
return false;
}
return true;
}
void serialize_engine(unsigned int max_batchsize, float& gd, float& gw, std::string& wts_name, std::string& engine_name, int n_classes) {
// Create builder
IBuilder* builder = createInferBuilder(gLogger);
IBuilderConfig* config = builder->createBuilderConfig();
// Create model to populate the network, then set the outputs and create an engine
ICudaEngine *engine = nullptr;
engine = build_seg_engine(max_batchsize, builder, config, DataType::kFLOAT, gd, gw, wts_name, kInputH, kInputW, n_classes);
assert(engine != nullptr);
// Serialize the engine
IHostMemory* serialized_engine = engine->serialize();
assert(serialized_engine != nullptr);
// Save engine to file
std::ofstream p(engine_name, std::ios::binary);
if (!p) {
std::cerr << "Could not open plan output file" << std::endl;
assert(false);
}
p.write(reinterpret_cast<const char*>(serialized_engine->data()), serialized_engine->size());
// Close everything down
engine->destroy();
builder->destroy();
config->destroy();
serialized_engine->destroy();
}
int main(int argc, char** argv) {
cudaSetDevice(kGpuId);
std::string wts_name = "";
std::string engine_name = "";
std::string labels_filename = "";
float gd = 0.0f, gw = 0.0f;
int n_classes;
std::string img_dir;
if (!parse_args(argc, argv, wts_name, engine_name, gd, gw, img_dir, labels_filename, n_classes)) {
std::cerr << "arguments not right!" << std::endl;
std::cerr << "./SpireCVSeg -s [.wts] [.engine] n_classes [n/s/m/l/x or c gd gw] // serialize model to plan file" << std::endl;
// std::cerr << "./SpireCVSeg -d [.engine] ../images coco.txt // deserialize plan file, read the labels file and run inference" << std::endl;
return -1;
}
std::cout << "n_classes: " << n_classes << std::endl;
// Create a model using the API directly and serialize it to a file
if (!wts_name.empty()) {
serialize_engine(kBatchSize, gd, gw, wts_name, engine_name, n_classes);
return 0;
}
return 0;
}

View File

@ -0,0 +1,48 @@
#include <opencv2/highgui.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/calib3d.hpp>
#include <ctime>
namespace {
inline static bool readCameraParameters(std::string filename, cv::Mat &camMatrix, cv::Mat &distCoeffs) {
cv::FileStorage fs(filename, cv::FileStorage::READ);
if (!fs.isOpened())
return false;
fs["camera_matrix"] >> camMatrix;
fs["distortion_coefficients"] >> distCoeffs;
return true;
}
inline static bool saveCameraParams(const std::string &filename, cv::Size imageSize, float aspectRatio, int flags,
const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, double totalAvgErr) {
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
if (!fs.isOpened())
return false;
time_t tt;
time(&tt);
struct tm *t2 = localtime(&tt);
char buf[1024];
strftime(buf, sizeof(buf) - 1, "%c", t2);
fs << "calibration_time" << buf;
fs << "image_width" << imageSize.width;
fs << "image_height" << imageSize.height;
if (flags & cv::CALIB_FIX_ASPECT_RATIO) fs << "aspectRatio" << aspectRatio;
if (flags != 0) {
sprintf(buf, "flags: %s%s%s%s",
flags & cv::CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "",
flags & cv::CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "",
flags & cv::CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "",
flags & cv::CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "");
}
fs << "flags" << flags;
fs << "camera_matrix" << cameraMatrix;
fs << "distortion_coefficients" << distCoeffs;
fs << "avg_reprojection_error" << totalAvgErr;
return true;
}
}

View File

@ -0,0 +1,293 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#include <opencv2/highgui.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <opencv2/imgproc.hpp>
#include <vector>
#include <iostream>
#include "aruco_samples_utility.hpp"
using namespace std;
using namespace cv;
namespace {
const char* about =
"Calibration using a ChArUco board\n"
" To capture a frame for calibration, press 'c',\n"
" If input comes from video, press any key for next frame\n"
" To finish capturing, press 'ESC' key and calibration starts.\n";
const char* keys =
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{sl | | Square side length (in meters) }"
"{ml | | Marker side length (in meters) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{cd | | Input file with custom dictionary }"
"{@outfile |<none> | Output file with calibrated camera parameters }"
"{v | | Input from video file, if ommited, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{dp | | File of marker detector parameters }"
"{rs | false | Apply refind strategy }"
"{zt | false | Assume zero tangential distortion }"
"{a | | Fix aspect ratio (fx/fy) to this value }"
"{pc | false | Fix the principal point at the center }"
"{sc | false | Show detected chessboard corners after calibration }";
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if(argc < 7) {
parser.printMessage();
return 0;
}
int squaresX = parser.get<int>("w");
int squaresY = parser.get<int>("h");
float squareLength = parser.get<float>("sl");
float markerLength = parser.get<float>("ml");
string outputFile = parser.get<string>(0);
bool showChessboardCorners = parser.get<bool>("sc");
int calibrationFlags = 0;
float aspectRatio = 1;
if(parser.has("a")) {
calibrationFlags |= CALIB_FIX_ASPECT_RATIO;
aspectRatio = parser.get<float>("a");
}
if(parser.get<bool>("zt")) calibrationFlags |= CALIB_ZERO_TANGENT_DIST;
if(parser.get<bool>("pc")) calibrationFlags |= CALIB_FIX_PRINCIPAL_POINT;
Ptr<aruco::DetectorParameters> detectorParams = makePtr<aruco::DetectorParameters>();
if(parser.has("dp")) {
FileStorage fs(parser.get<string>("dp"), FileStorage::READ);
bool readOk = detectorParams->readDetectorParameters(fs.root());
if(!readOk) {
cerr << "Invalid detector parameters file" << endl;
return 0;
}
}
bool refindStrategy = parser.get<bool>("rs");
int camId = parser.get<int>("ci");
String video;
if(parser.has("v")) {
video = parser.get<String>("v");
}
if(!parser.check()) {
parser.printErrors();
return 0;
}
VideoCapture inputVideo;
int waitTime;
if(!video.empty()) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
waitTime = 10;
}
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
if (parser.has("d")) {
int dictionaryId = parser.get<int>("d");
dictionary = aruco::getPredefinedDictionary(aruco::PredefinedDictionaryType(dictionaryId));
}
else if (parser.has("cd")) {
FileStorage fs(parser.get<std::string>("cd"), FileStorage::READ);
bool readOk = dictionary.aruco::Dictionary::readDictionary(fs.root());
if(!readOk) {
cerr << "Invalid dictionary file" << endl;
return 0;
}
}
else {
cerr << "Dictionary not specified" << endl;
return 0;
}
// create charuco board object
Ptr<aruco::CharucoBoard> charucoboard = new aruco::CharucoBoard(Size(squaresX, squaresY), squareLength, markerLength, dictionary);
Ptr<aruco::Board> board = charucoboard.staticCast<aruco::Board>();
// collect data from each frame
vector< vector< vector< Point2f > > > allCorners;
vector< vector< int > > allIds;
vector< Mat > allImgs;
Size imgSize;
while(inputVideo.grab()) {
Mat image, imageCopy;
inputVideo.retrieve(image);
vector< int > ids;
vector< vector< Point2f > > corners, rejected;
// detect markers
aruco::detectMarkers(image, makePtr<aruco::Dictionary>(dictionary), corners, ids, detectorParams, rejected);
// refind strategy to detect more markers
if(refindStrategy) aruco::refineDetectedMarkers(image, board, corners, ids, rejected);
// interpolate charuco corners
Mat currentCharucoCorners, currentCharucoIds;
if(ids.size() > 0)
aruco::interpolateCornersCharuco(corners, ids, image, charucoboard, currentCharucoCorners,
currentCharucoIds);
// draw results
image.copyTo(imageCopy);
if(ids.size() > 0) aruco::drawDetectedMarkers(imageCopy, corners);
if(currentCharucoCorners.total() > 0)
aruco::drawDetectedCornersCharuco(imageCopy, currentCharucoCorners, currentCharucoIds);
putText(imageCopy, "Press 'c' to add current frame. 'ESC' to finish and calibrate",
Point(10, 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 0, 0), 2);
imshow("out", imageCopy);
char key = (char)waitKey(waitTime);
if(key == 27) break;
if(key == 'c' && ids.size() > 0) {
cout << "Frame captured" << endl;
allCorners.push_back(corners);
allIds.push_back(ids);
allImgs.push_back(image);
imgSize = image.size();
}
}
if(allIds.size() < 1) {
cerr << "Not enough captures for calibration" << endl;
return 0;
}
Mat cameraMatrix, distCoeffs;
vector< Mat > rvecs, tvecs;
double repError;
if(calibrationFlags & CALIB_FIX_ASPECT_RATIO) {
cameraMatrix = Mat::eye(3, 3, CV_64F);
cameraMatrix.at< double >(0, 0) = aspectRatio;
}
// prepare data for calibration
vector< vector< Point2f > > allCornersConcatenated;
vector< int > allIdsConcatenated;
vector< int > markerCounterPerFrame;
markerCounterPerFrame.reserve(allCorners.size());
for(unsigned int i = 0; i < allCorners.size(); i++) {
markerCounterPerFrame.push_back((int)allCorners[i].size());
for(unsigned int j = 0; j < allCorners[i].size(); j++) {
allCornersConcatenated.push_back(allCorners[i][j]);
allIdsConcatenated.push_back(allIds[i][j]);
}
}
// calibrate camera using aruco markers
double arucoRepErr;
arucoRepErr = aruco::calibrateCameraAruco(allCornersConcatenated, allIdsConcatenated,
markerCounterPerFrame, board, imgSize, cameraMatrix,
distCoeffs, noArray(), noArray(), calibrationFlags);
// prepare data for charuco calibration
int nFrames = (int)allCorners.size();
vector< Mat > allCharucoCorners;
vector< Mat > allCharucoIds;
vector< Mat > filteredImages;
allCharucoCorners.reserve(nFrames);
allCharucoIds.reserve(nFrames);
for(int i = 0; i < nFrames; i++) {
// interpolate using camera parameters
Mat currentCharucoCorners, currentCharucoIds;
aruco::interpolateCornersCharuco(allCorners[i], allIds[i], allImgs[i], charucoboard,
currentCharucoCorners, currentCharucoIds, cameraMatrix,
distCoeffs);
allCharucoCorners.push_back(currentCharucoCorners);
allCharucoIds.push_back(currentCharucoIds);
filteredImages.push_back(allImgs[i]);
}
if(allCharucoCorners.size() < 4) {
cerr << "Not enough corners for calibration" << endl;
return 0;
}
// calibrate camera using charuco
repError =
aruco::calibrateCameraCharuco(allCharucoCorners, allCharucoIds, charucoboard, imgSize,
cameraMatrix, distCoeffs, rvecs, tvecs, calibrationFlags);
bool saveOk = saveCameraParams(outputFile, imgSize, aspectRatio, calibrationFlags,
cameraMatrix, distCoeffs, repError);
if(!saveOk) {
cerr << "Cannot save output file" << endl;
return 0;
}
cout << "Rep Error: " << repError << endl;
cout << "Rep Error Aruco: " << arucoRepErr << endl;
cout << "Calibration saved to " << outputFile << endl;
// show interpolated charuco corners for debugging
if(showChessboardCorners) {
for(unsigned int frame = 0; frame < filteredImages.size(); frame++) {
Mat imageCopy = filteredImages[frame].clone();
if(allIds[frame].size() > 0) {
if(allCharucoCorners[frame].total() > 0) {
aruco::drawDetectedCornersCharuco( imageCopy, allCharucoCorners[frame],
allCharucoIds[frame]);
}
}
imshow("out", imageCopy);
char key = (char)waitKey(0);
if(key == 27) break;
}
}
return 0;
}

View File

@ -0,0 +1,74 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
int main(int argc, char *argv[]) {
// 实例化Aruco检测器类
sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(ad.image_width, ad.image_height));
// 执行Aruco二维码检测
ad.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印Aruco检测结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
for (int i=0; i<tgts.targets.size(); i++)
{
printf("Frame-[%d], Aruco-[%d]\n", frame_id, i);
// 打印每个二维码的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Aruco Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].cx, tgts.targets[i].cy,
int(tgts.targets[i].cx * tgts.width),
int(tgts.targets[i].cy * tgts.height));
// 打印每个二维码的外接矩形框的宽度、高度wh的值域为(0, 1]以及wh的像素值
printf(" Aruco Size (w, h) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].w, tgts.targets[i].h,
int(tgts.targets[i].w * tgts.width),
int(tgts.targets[i].h * tgts.height));
// 打印每个二维码的方位角,值域为[-180, 180]
printf(" Aruco Yaw-angle = %.2f\n", tgts.targets[i].yaw_a);
// 打印每个二维码的类别,字符串类型,"aruco-?"
printf(" Aruco Category = %s\n", tgts.targets[i].category.c_str());
// 打印每个二维码的ID号
printf(" Aruco Tracked-ID = %d\n", tgts.targets[i].tracked_id);
// 打印每个二维码的视线角,跟相机视场相关
printf(" Aruco Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
// 打印每个二维码的3D位置在相机坐标系下跟二维码实际边长、相机参数相关
printf(" Aruco Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
}
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -0,0 +1,27 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
int main(int argc, char *argv[]) {
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
while (1)
{
// 读取一帧图像到img
cap.read(img);
// 显示img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -0,0 +1,72 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
int main(int argc, char *argv[]) {
// 实例化 通用目标 检测器类
sv::CommonObjectDetector cod;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(cod.image_width, cod.image_height));
// 执行通用目标检测
cod.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印通用目标检测结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
for (int i=0; i<tgts.targets.size(); i++)
{
printf("Frame-[%d], Object-[%d]\n", frame_id, i);
// 打印每个目标的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Object Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].cx, tgts.targets[i].cy,
int(tgts.targets[i].cx * tgts.width),
int(tgts.targets[i].cy * tgts.height));
// 打印每个目标的外接矩形框的宽度、高度wh的值域为(0, 1]以及wh的像素值
printf(" Object Size (w, h) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].w, tgts.targets[i].h,
int(tgts.targets[i].w * tgts.width),
int(tgts.targets[i].h * tgts.height));
// 打印每个目标的置信度
printf(" Object Score = %.3f\n", tgts.targets[i].score);
// 打印每个目标的类别,字符串类型
printf(" Object Category = %s, Category ID = [%d]\n", tgts.targets[i].category.c_str(), tgts.targets[i].category_id);
// 打印每个目标的视线角,跟相机视场相关
printf(" Object Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
// 打印每个目标的3D位置在相机坐标系下跟目标实际长宽、相机参数相关
printf(" Object Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
}
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -0,0 +1,193 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
// 定义窗口名称
static const std::string RGB_WINDOW = "Image window";
// 框选到的矩形
cv::Rect rect_sel;
// 框选起始点
cv::Point pt_origin;
// 是否得到一个新的框选区域
bool b_renew_ROI = false;
// 是否开始跟踪
bool b_begin_TRACK = false;
// 实现框选逻辑的回调函数
void onMouse(int event, int x, int y, int, void*);
struct node {
double x,y;
};
node p1,p2,p3,p4;
node p;
double getCross(node p1, node p2, node p) {
return (p2.x-p1.x)*(p.y-p1.y)-(p.x-p1.x)*(p2.y-p1.y);
}
bool b_clicked =false;
bool detect_tracking =true;
int main(int argc, char *argv[]) {
// 定义一个新的窗口,可在上面进行框选操作
cv::namedWindow(RGB_WINDOW);
// 设置窗口操作回调函数,该函数实现整个框选逻辑
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
sv::CommonObjectDetector cod;
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
if (detect_tracking == true) {
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(cod.image_width, cod.image_height));
// 执行通用目标检测
cod.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印通用目标检测结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
for (int i=0; i<tgts.targets.size(); i++)
{
printf("Frame-[%d], Object-[%d]\n", frame_id, i);
// 打印每个目标的中心位置cxcy的值域为[0, 1]
printf(" Object Center (cx, cy) = (%.3f, %.3f)\n", tgts.targets[i].cx, tgts.targets[i].cy);
// 打印每个目标的外接矩形框的宽度、高度wh的值域为(0, 1]
printf(" Object Size (w, h) = (%.3f, %.3f)\n", tgts.targets[i].w, tgts.targets[i].h);
// 打印每个目标的置信度
printf(" Object Score = %.3f\n", tgts.targets[i].score);
// 打印每个目标的类别,字符串类型
printf(" Object Category = %s, Category ID = [%d]\n", tgts.targets[i].category.c_str(), tgts.targets[i].category_id);
// 打印每个目标的视线角,跟相机视场相关
printf(" Object Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
// 打印每个目标的3D位置在相机坐标系下跟目标实际长宽、相机参数相关
printf(" Object Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
p1.x = tgts.targets[i].cx * tgts.width - tgts.targets[i].w * tgts.width / 2;
p1.y = tgts.targets[i].cy * tgts.height - tgts.targets[i].h * tgts.height / 2;
p2.x = tgts.targets[i].cx * tgts.width + tgts.targets[i].w * tgts.width / 2;
p2.y = tgts.targets[i].cy * tgts.height - tgts.targets[i].h * tgts.height / 2;
p4.x = tgts.targets[i].cx * tgts.width - tgts.targets[i].w * tgts.width / 2;
p4.y = tgts.targets[i].cy * tgts.height + tgts.targets[i].h * tgts.height / 2;
p3.x = tgts.targets[i].cx * tgts.width + tgts.targets[i].w * tgts.width / 2;
p3.y = tgts.targets[i].cy * tgts.height + tgts.targets[i].h * tgts.height / 2;
p.x = pt_origin.x;
p.y = pt_origin.y;
std::cout << "p.x " << p.x << "\t" << "p.y " << p.y << std::endl;
if (getCross(p1, p2, p) * getCross(p3, p4, p) >= 0 && getCross(p2, p3, p) * getCross(p4, p1, p) >= 0) {
b_begin_TRACK = false;
detect_tracking = false;
// pt_origin = cv::Point(nor_x, nor_p_y);
// std::cout << "pt_origin " <<nor_x<<"/t"<<nor_p_y<< std::endl;
rect_sel = cv::Rect(p1.x, p1.y, tgts.targets[i].w * tgts.width, tgts.targets[i].h * tgts.height);
// std::cout << rect_sel << std::endl;
b_renew_ROI = true;
frame_id = 0;
printf("rect_sel Yes\n");
}
else {
printf("rect_sel No\n");
}
}
}
else {
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(sot.image_width, sot.image_height));
// 开始 单目标跟踪 逻辑
// 是否有新的目标被手动框选
if (b_renew_ROI)
{
// 拿新的框选区域 来 初始化跟踪器
sot.init(img, rect_sel);
// std::cout << rect_sel << std::endl;
// 重置框选标志
b_renew_ROI = false;
// 开始跟踪
b_begin_TRACK = true;
}
else if (b_begin_TRACK)
{
// 以前一帧的结果继续跟踪
sot.track(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印 单目标跟踪 结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
if (tgts.targets.size() > 0)
{
printf("Frame-[%d]\n", frame_id);
// 打印 跟踪目标 的中心位置cxcy的值域为[0, 1]
printf(" Tracking Center (cx, cy) = (%.3f, %.3f)\n", tgts.targets[0].cx, tgts.targets[0].cy);
// 打印 跟踪目标 的外接矩形框的宽度、高度wh的值域为(0, 1]
printf(" Tracking Size (w, h) = (%.3f, %.3f)\n", tgts.targets[0].w, tgts.targets[0].h);
// 打印 跟踪目标 的视线角,跟相机视场相关
printf(" Tracking Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[0].los_ax, tgts.targets[0].los_ay);
}
}
}//end of tracking
// 显示检测结果img
cv::imshow(RGB_WINDOW, img);
cv::waitKey(10);
}
return 0;
}
void onMouse(int event, int x, int y, int, void*)
{
if (b_clicked)
{
// 更新框选区域坐标
pt_origin.x = 0;
pt_origin.y = 0;
}
// 左键按下
if (event == cv::EVENT_LBUTTONDOWN)
{
detect_tracking = true;
pt_origin = cv::Point(x, y);
}
else if (event == cv::EVENT_RBUTTONDOWN)
{
detect_tracking = true;
b_renew_ROI = false;
b_begin_TRACK = false;
b_clicked = true;
}
}

View File

@ -0,0 +1,70 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
int main(int argc, char *argv[]) {
// 实例化 椭圆 检测器类
sv::EllipseDetector ed;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ed.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(ed.image_width, ed.image_height));
// 执行 椭圆 检测
ed.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印 椭圆 检测结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
for (int i=0; i<tgts.targets.size(); i++)
{
printf("Frame-[%d], Ellipse-[%d]\n", frame_id, i);
// 打印每个 椭圆 的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Ellipse Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].cx, tgts.targets[i].cy,
int(tgts.targets[i].cx * tgts.width),
int(tgts.targets[i].cy * tgts.height));
// 打印每个 椭圆 的外接矩形框的宽度、高度wh的值域为(0, 1]以及wh的像素值
printf(" Ellipse Size (w, h) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].w, tgts.targets[i].h,
int(tgts.targets[i].w * tgts.width),
int(tgts.targets[i].h * tgts.height));
// 打印每个 椭圆 的置信度
printf(" Ellipse Score = %.3f\n", tgts.targets[i].score);
// 打印每个 椭圆 的视线角,跟相机视场相关
printf(" Ellipse Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
// 打印每个 椭圆 的3D位置在相机坐标系下跟 椭圆 实际半径、相机参数相关
printf(" Ellipse Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
}
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -0,0 +1,72 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
int main(int argc, char *argv[]) {
// 实例化 圆形降落标志 检测器类
sv::LandingMarkerDetector lmd;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
lmd.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(lmd.image_width, lmd.image_height));
// 执行 降落标志 检测
lmd.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印 降落标志 检测结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
for (int i=0; i<tgts.targets.size(); i++)
{
printf("Frame-[%d], Marker-[%d]\n", frame_id, i);
// 打印每个 降落标志 的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Marker Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].cx, tgts.targets[i].cy,
int(tgts.targets[i].cx * tgts.width),
int(tgts.targets[i].cy * tgts.height));
// 打印每个 降落标志 的外接矩形框的宽度、高度wh的值域为(0, 1]以及wh的像素值
printf(" Marker Size (w, h) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[i].w, tgts.targets[i].h,
int(tgts.targets[i].w * tgts.width),
int(tgts.targets[i].h * tgts.height));
// 打印每个 降落标志 的置信度
printf(" Marker Score = %.3f\n", tgts.targets[i].score);
// 打印每个 降落标志 的类别,字符串类型,"h"、"x"、"1"、"2"、"3"...
printf(" Marker Category = %s, Category ID = %d\n", tgts.targets[i].category.c_str(), tgts.targets[i].category_id);
// 打印每个 降落标志 的视线角,跟相机视场相关
printf(" Marker Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[i].los_ax, tgts.targets[i].los_ay);
// 打印每个 降落标志 的3D位置在相机坐标系下跟降落标志实际半径、相机参数相关
printf(" Marker Position = (x, y, z) = (%.3f, %.3f, %.3f)\n", tgts.targets[i].px, tgts.targets[i].py, tgts.targets[i].pz);
}
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -0,0 +1,139 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
// 定义窗口名称
static const std::string RGB_WINDOW = "Image window";
// 框选到的矩形
cv::Rect rect_sel;
// 框选起始点
cv::Point pt_origin;
// 是否按下左键
bool b_clicked = false;
// 是否得到一个新的框选区域
bool b_renew_ROI = false;
// 是否开始跟踪
bool b_begin_TRACK = false;
// 实现框选逻辑的回调函数
void onMouse(int event, int x, int y, int, void*);
int main(int argc, char *argv[]) {
// 定义一个新的窗口,可在上面进行框选操作
cv::namedWindow(RGB_WINDOW);
// 设置窗口操作回调函数,该函数实现整个框选逻辑
cv::setMouseCallback(RGB_WINDOW, onMouse, 0);
// 实例化 框选目标跟踪类
sv::SingleObjectTracker sot;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1280x720.yaml");
// sot.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_1920x1080.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// cv::VideoCapture cap("/home/amov/SpireCV/test/tracking_1280x720.mp4");
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(sot.image_width, sot.image_height));
// 开始 单目标跟踪 逻辑
// 是否有新的目标被手动框选
if (b_renew_ROI)
{
// 拿新的框选区域 来 初始化跟踪器
sot.init(img, rect_sel);
// std::cout << rect_sel << std::endl;
// 重置框选标志
b_renew_ROI = false;
// 开始跟踪
b_begin_TRACK = true;
}
else if (b_begin_TRACK)
{
// 以前一帧的结果继续跟踪
sot.track(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 控制台打印 单目标跟踪 结果
printf("Frame-[%d]\n", frame_id);
// 打印当前检测的FPS
printf(" FPS = %.2f\n", tgts.fps);
// 打印当前相机的视场角degree
printf(" FOV (fx, fy) = (%.2f, %.2f)\n", tgts.fov_x, tgts.fov_y);
// 打印当前输入图像的像素宽度和高度
printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height);
if (tgts.targets.size() > 0)
{
printf("Frame-[%d]\n", frame_id);
// 打印 跟踪目标 的中心位置cxcy的值域为[0, 1]以及cxcy的像素值
printf(" Tracking Center (cx, cy) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[0].cx, tgts.targets[0].cy,
int(tgts.targets[0].cx * tgts.width),
int(tgts.targets[0].cy * tgts.height));
// 打印 跟踪目标 的外接矩形框的宽度、高度wh的值域为(0, 1]以及wh的像素值
printf(" Tracking Size (w, h) = (%.3f, %.3f), in Pixels = ((%d, %d))\n",
tgts.targets[0].w, tgts.targets[0].h,
int(tgts.targets[0].w * tgts.width),
int(tgts.targets[0].h * tgts.height));
// 打印 跟踪目标 的视线角,跟相机视场相关
printf(" Tracking Line-of-sight (ax, ay) = (%.3f, %.3f)\n", tgts.targets[0].los_ax, tgts.targets[0].los_ay);
}
}
// 显示检测结果img
cv::imshow(RGB_WINDOW, img);
cv::waitKey(10);
}
return 0;
}
void onMouse(int event, int x, int y, int, void*)
{
if (b_clicked)
{
// 更新框选区域坐标
rect_sel.x = MIN(pt_origin.x, x);
rect_sel.y = MIN(pt_origin.y, y);
rect_sel.width = abs(x - pt_origin.x);
rect_sel.height = abs(y - pt_origin.y);
}
// 左键按下
if (event == cv::EVENT_LBUTTONDOWN)
{
b_begin_TRACK = false;
b_clicked = true;
pt_origin = cv::Point(x, y);
rect_sel = cv::Rect(x, y, 0, 0);
}
// 左键松开
else if (event == cv::EVENT_LBUTTONUP)
{
// 框选区域需要大于8x8像素
if (rect_sel.width * rect_sel.height < 64)
{
;
}
else
{
b_clicked = false;
b_renew_ROI = true;
}
}
}

View File

@ -0,0 +1,248 @@
#include <iostream>
#include <string>
#include <arpa/inet.h>
#include <string.h>
#include <stdio.h>
#define SERV_PORT 20166
typedef unsigned char byte;
using namespace std;
int main(int argc, char *argv[]) {
sockaddr_in servaddr;
int sockfd;
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
bzero(&servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(SERV_PORT);
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
bind(sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr));
int upd_msg_len = 1024 * 6; // max_objects = 100
byte upd_msg[upd_msg_len];
int msg_queue_len = 1024 * 1024; // 1M
byte msg_queue[msg_queue_len];
int addr_len = sizeof(struct sockaddr_in);
int start_index = 0, end_index = 0;
while (1)
{
int n = recvfrom(sockfd, upd_msg, upd_msg_len, 0, (struct sockaddr *)&servaddr, reinterpret_cast<socklen_t*>(&addr_len));
if (end_index + n > msg_queue_len)
{
int m = end_index - start_index;
memcpy((void*) &msg_queue[0], (const void*) &msg_queue[start_index], (size_t) m);
start_index = 0;
end_index = m;
}
memcpy((void*) &msg_queue[end_index], (const void*) upd_msg, (size_t) n);
end_index += n;
cout << n << ", " << start_index << ", " << end_index << endl;
// processing
while (start_index < end_index)
{
int i = start_index;
if (i > 0 && msg_queue[i-1] == 0xFA && msg_queue[i] == 0xFC) // frame start
{
cout << "FOUND 0xFAFC" << endl;
i++;
if (end_index - i >= 2) // read length
{
unsigned short* len = reinterpret_cast<unsigned short*>(&msg_queue[i]);
int ilen = (int) (*len);
cout << "LEN: " << ilen << endl;
if (end_index - i >= ilen + 2 && msg_queue[i+ilen] == 0xFB && msg_queue[i+ilen+1] == 0xFD)
{
cout << "FOUND 0xFAFC & 0xFBFD" << endl;
byte* msg_type = reinterpret_cast<byte*>(&msg_queue[i+2]);
cout << "Type: " << (int) *msg_type << endl;
unsigned short* year = reinterpret_cast<unsigned short*>(&msg_queue[i+7]);
byte* month = reinterpret_cast<byte*>(&msg_queue[i+9]);
byte* day = reinterpret_cast<byte*>(&msg_queue[i+10]);
byte* hour = reinterpret_cast<byte*>(&msg_queue[i+11]);
byte* minute = reinterpret_cast<byte*>(&msg_queue[i+12]);
byte* second = reinterpret_cast<byte*>(&msg_queue[i+13]);
unsigned short* millisecond = reinterpret_cast<unsigned short*>(&msg_queue[i+14]);
cout << "Time: " << *year << "-" << (int) *month << "-" << (int) *day << " " << (int) *hour << ":" << (int) *minute << ":" << (int) *second << " " << *millisecond << endl;
byte* index_d1 = reinterpret_cast<byte*>(&msg_queue[i+16]);
byte* index_d2 = reinterpret_cast<byte*>(&msg_queue[i+17]);
byte* index_d3 = reinterpret_cast<byte*>(&msg_queue[i+18]);
byte* index_d4 = reinterpret_cast<byte*>(&msg_queue[i+19]);
int mp = i+20;
if ((*index_d4) & 0x01 == 0x01)
{
int* frame_id = reinterpret_cast<int*>(&msg_queue[mp]);
mp += 4;
cout << "FrameID: " << *frame_id << endl;
}
if ((*index_d4) & 0x02 == 0x02 && (*index_d4) & 0x04 == 0x04)
{
int* width = reinterpret_cast<int*>(&msg_queue[mp]);
mp += 4;
int* height = reinterpret_cast<int*>(&msg_queue[mp]);
mp += 4;
cout << "FrameSize: (" << *width << ", " << *height << ")" << endl;
}
int n_objects = 0;
if ((*index_d4) & 0x08 == 0x08)
{
n_objects = *reinterpret_cast<int*>(&msg_queue[mp]);
mp += 4;
cout << "N_Objects: " << n_objects << endl;
}
if ((*index_d4) & 0x10 == 0x10)
{
float* fps = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << "FPS: " << *fps << endl;
}
if ((*index_d4) & 0x20 == 0x20 && (*index_d4) & 0x40 == 0x40)
{
float* fov_x = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* fov_y = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << "FOV: (" << *fov_x << ", " << *fov_y << ")" << endl;
}
if ((*index_d4) & 0x80 == 0x80 && (*index_d3) & 0x01 == 0x01 && (*index_d3) & 0x02 == 0x02)
{
float* pod_patch = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* pod_roll = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* pod_yaw = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << "POD-Angles: (" << *pod_patch << ", " << *pod_roll << ", " << *pod_yaw << ")" << endl;
}
if ((*index_d3) & 0x04 == 0x04 && (*index_d3) & 0x08 == 0x08 && (*index_d3) & 0x10 == 0x10)
{
float* longitude = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* latitude = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* altitude = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << "UAV-Position: (" << *longitude << ", " << *latitude << ", " << *altitude << ")" << endl;
}
if ((*index_d3) & 0x20 == 0x20 && (*index_d3) & 0x40 == 0x40 && (*index_d3) & 0x80 == 0x80)
{
float* uav_vx = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* uav_vy = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* uav_vz = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << "UAV-Speed: (" << *uav_vx << ", " << *uav_vy << ", " << *uav_vz << ")" << endl;
}
if ((*index_d2) & 0x01 == 0x01)
{
float* illumination = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << "Illumination: " << *illumination << endl;
}
for (int j=0; j<n_objects; j++)
{
byte* index_f1 = reinterpret_cast<byte*>(&msg_queue[mp]);
mp++;
byte* index_f2 = reinterpret_cast<byte*>(&msg_queue[mp]);
mp++;
byte* index_f3 = reinterpret_cast<byte*>(&msg_queue[mp]);
mp++;
byte* index_f4 = reinterpret_cast<byte*>(&msg_queue[mp]);
mp++;
if ((*index_f4) & 0x01 == 0x01 && (*index_f4) & 0x02 == 0x02)
{
float* cx = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* cy = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << " Object-[" << j+1 << "]-CXCY: (" << *cx << ", " << *cy << ")" << endl;
}
if ((*index_f4) & 0x04 == 0x04 && (*index_f4) & 0x08 == 0x08)
{
float* w = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* h = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << " Object-[" << j+1 << "]-WH: (" << *w << ", " << *h << ")" << endl;
}
if ((*index_f4) & 0x10 == 0x10)
{
float* score = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << " Object-[" << j+1 << "]-Score: " << *score << endl;
}
if ((*index_f4) & 0x20 == 0x20)
{
int* category_id = reinterpret_cast<int*>(&msg_queue[mp]);
mp += 4;
cout << " Object-[" << j+1 << "]-CateID: " << *category_id << endl;
}
if ((*index_f4) & 0x40 == 0x40)
{
int* tracked_id = reinterpret_cast<int*>(&msg_queue[mp]);
mp += 4;
cout << " Object-[" << j+1 << "]-TrackID: " << *tracked_id << endl;
}
if ((*index_f4) & 0x80 == 0x80 && (*index_f3) & 0x01 == 0x01 && (*index_f3) & 0x02 == 0x02)
{
float* px = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* py = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* pz = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << " Object-[" << j+1 << "]-Position: (" << *px << ", " << *py << ", " << *pz << ")" << endl;
}
if ((*index_f3) & 0x04 == 0x04 && (*index_f3) & 0x08 == 0x08)
{
float* los_ax = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
float* los_ay = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << " Object-[" << j+1 << "]-LOS: (" << *los_ax << ", " << *los_ay << ")" << endl;
}
if ((*index_f3) & 0x10 == 0x10)
{
float* yaw_a = reinterpret_cast<float*>(&msg_queue[mp]);
mp += 4;
cout << " Object-[" << j+1 << "]-YAW: " << *yaw_a << endl;
}
}
start_index += ilen + 4;
}
else if (end_index - i < ilen + 2)
{
break;
}
else
{
start_index++;
}
}
else
{
break;
}
}
else
{
start_index++;
}
}
}
return 0;
}

View File

@ -0,0 +1,60 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
int main(int argc, char *argv[]) {
// 实例化Aruco检测器类
sv::ArucoDetector ad;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
ad.loadCameraParams("/home/amov/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
cap.setWH(640, 480);
cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
sv::UDPServer udp;
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
// 执行Aruco二维码检测
ad.detect(img, tgts);
tgts.has_pod_info = true;
tgts.pod_patch = 1;
tgts.pod_roll = 2;
tgts.pod_yaw = 3;
tgts.has_uav_pos = true;
tgts.longitude = 1.1234567;
tgts.latitude = 2.2345678;
tgts.altitude = 3.3456789;
tgts.has_uav_vel = true;
tgts.uav_vx = 4;
tgts.uav_vy = 5;
tgts.uav_vz = 6;
tgts.has_ill = true;
tgts.illumination = 7;
// www.write(img, tgts);
udp.send(tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -0,0 +1,51 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
int main(int argc, char *argv[]) {
// 实例化 通用目标 检测器类
sv::CommonObjectDetector cod;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cod.loadCameraParams("/home/amov/SpireCV/calib_webcam_640x480.yaml");
// 打开摄像头
sv::Camera cap;
// cap.setWH(640, 480);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
int frame_id = 0;
// 实例化视频保存类
sv::VideoWriter vw;
// 设置保存路径"/home/amov/Videos"保存图像尺寸640480帧频25Hz同步保存检测结果.svj
vw.setup("/home/amov/Videos", cv::Size(640, 480), 25, true);
while (1)
{
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(frame_id++);
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(640, 480));
// 执行通用目标检测
cod.detect(img, tgts);
// 同步保存视频流 和 检测结果信息
vw.write(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -0,0 +1,35 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
int main(int argc, char *argv[]) {
// 打开摄像头
sv::Camera cap;
// cap.setWH(1280, 720);
// cap.setFps(30);
cap.open(sv::CameraType::WEBCAM, 0); // CameraID 0
// 实例化视频推流类sv::VideoStreamer
sv::VideoStreamer streamer;
// 初始化 推流分辨率(640, 480)端口号8554比特率1Mb
streamer.setup(cv::Size(1280, 720), 8554, 1);
// 实例化OpenCV的Mat类用于内存单帧图像
cv::Mat img;
while (1)
{
// 读取一帧图像到img
cap.read(img);
cv::resize(img, img, cv::Size(1280, 720));
// 将img推流到 地址rtsp://ip:8554/live
streamer.stream(img);
// 显示检测结果img
cv::imshow("img", img);
cv::waitKey(10);
}
return 0;
}

View File

@ -0,0 +1,28 @@
#!/bin/sh
wget https://download.amovlab.com/model/install/benchmark/aruco_1280x720.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/aruco_640x480.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/car_1280x720.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/car_640x480.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/drone_1280x720.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/drone_640x480.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/ellipse_1280x720.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/ellipse_640x480.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/landing_1280x720.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/landing_640x480.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/tracking_1280x720.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/benchmark/tracking_640x480.mp4 -P ${HOME}/SpireCV/test
wget https://download.amovlab.com/model/install/c-params/calib_webcam_1280x720.yaml -P ${HOME}/SpireCV
wget https://download.amovlab.com/model/install/c-params/calib_webcam_1920x1080.yaml -P ${HOME}/SpireCV
wget https://download.amovlab.com/model/install/a-params/sv_algorithm_params_1280_wo_mask.json -P ${HOME}/SpireCV
wget https://download.amovlab.com/model/install/a-params/sv_algorithm_params_640_w_mask.json -P ${HOME}/SpireCV
wget https://download.amovlab.com/model/install/a-params/sv_algorithm_params_640_wo_mask.json -P ${HOME}/SpireCV
wget https://download.amovlab.com/model/install/a-params/sv_algorithm_params_csrt.json -P ${HOME}/SpireCV
wget https://download.amovlab.com/model/install/a-params/sv_algorithm_params_kcf.json -P ${HOME}/SpireCV
wget https://download.amovlab.com/model/install/a-params/sv_algorithm_params_siamrpn.json -P ${HOME}/SpireCV
wget https://download.amovlab.com/model/install/a-params/sv_algorithm_params_nano.json -P ${HOME}/SpireCV

View File

@ -0,0 +1,63 @@
#!/bin/sh
sudo apt install -y \
build-essential yasm cmake libtool libc6 libc6-dev unzip wget libfmt-dev \
libnuma1 libnuma-dev libx264-dev libx265-dev libfaac-dev libssl-dev
root_dir=${HOME}"/SpireCV"
if [ ! -d ${root_dir} ]; then
echo -e "\033[32m[INFO]: ${root_dir} not exist, creating it ... \033[0m"
mkdir -p ${root_dir}
fi
cd ${root_dir}
git clone https://gitee.com/jario-jin/nv-codec-headers.git
cd nv-codec-headers
git checkout n11.1.5.0
sudo make install
cd ..
wget https://ffmpeg.org/releases/ffmpeg-4.2.5.tar.bz2
tar -xjf ffmpeg-4.2.5.tar.bz2
cd ffmpeg-4.2.5
export PATH=$PATH:/usr/local/cuda/bin
sed -i 's#_30#_75#' configure; sed -i 's#_30#_75#' configure
./configure \
--enable-nonfree \
--enable-gpl \
--enable-shared \
--enable-ffmpeg \
--enable-ffplay \
--enable-ffprobe \
--enable-libx264 \
--enable-libx265 \
--enable-cuda-nvcc \
--enable-nvenc \
--enable-cuda \
--enable-cuvid \
--enable-libnpp \
--extra-libs="-lpthread -lm" \
--extra-cflags=-I/usr/local/cuda/include \
--extra-ldflags=-L/usr/local/cuda/lib64
make -j8
sudo make install
cd ..
git clone https://gitee.com/jario-jin/ZLMediaKit.git
cd ZLMediaKit
git submodule update --init
mkdir build
cd build
cmake ..
make -j4
cd ..
cd ..
mkdir ZLM
cd ZLM
cp ../ZLMediaKit/release/linux/Debug/MediaServer .
cp ../ZLMediaKit/release/linux/Debug/config.ini .
cd ..

View File

@ -0,0 +1,23 @@
#!/bin/sh
sudo apt install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
sudo apt install -y libgstreamer-plugins-bad1.0-dev gstreamer1.0-plugins-base
sudo apt install -y gstreamer1.0-plugins-good gstreamer1.0-plugins-bad
sudo apt install -y gstreamer1.0-plugins-ugly gstreamer1.0-libav gstreamer1.0-doc
sudo apt install -y gstreamer1.0-tools gstreamer1.0-x gstreamer1.0-alsa
sudo apt install -y gstreamer1.0-gl gstreamer1.0-gtk3 gstreamer1.0-qt5
sudo apt install -y gstreamer1.0-pulseaudio
sudo apt install -y gtk-doc-tools
sudo apt -y install autotools-dev automake m4 perl
sudo apt -y install libtool
autoreconf -ivf
git clone https://gitee.com/jario-jin/gst-rtsp-server-b18.git
cd gst-rtsp-server-b18
./autogen.sh
make
sudo make install
cd ..
sudo rm -r gst-rtsp-server-b18

View File

@ -0,0 +1,19 @@
#!/bin/sh
sudo apt install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
sudo apt install -y libgstreamer-plugins-bad1.0-dev gstreamer1.0-plugins-base
sudo apt install -y gstreamer1.0-plugins-good gstreamer1.0-plugins-bad
sudo apt install -y gstreamer1.0-plugins-ugly gstreamer1.0-libav gstreamer1.0-doc
sudo apt install -y gstreamer1.0-tools gstreamer1.0-x gstreamer1.0-alsa
sudo apt install -y gstreamer1.0-gl gstreamer1.0-gtk3 gstreamer1.0-qt5
sudo apt install -y gstreamer1.0-pulseaudio
sudo apt install -y gtk-doc-tools
git clone https://gitee.com/jario-jin/gst-rtsp-server-b18.git
cd gst-rtsp-server-b18
./autogen.sh
make
sudo make install
cd ..
sudo rm -r gst-rtsp-server-b18

View File

@ -0,0 +1,41 @@
#!/bin/bash -e
export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
root_dir=${HOME}"/SpireCV/models"
coco_model1="COCO-yolov5s.wts"
coco_model2="COCO-yolov5s6.wts"
coco_model3="COCO-yolov5s-seg.wts"
coco_model1_fn=${root_dir}/${coco_model1}
coco_model2_fn=${root_dir}/${coco_model2}
coco_model3_fn=${root_dir}/${coco_model3}
drone_model1="Drone-yolov5s-ap935-v230302.wts"
drone_model2="Drone-yolov5s6-ap949-v230302.wts"
drone_model1_fn=${root_dir}/${drone_model1}
drone_model2_fn=${root_dir}/${drone_model2}
personcar_model1="PersonCar-yolov5s-ap606-v230306.wts"
personcar_model2="PersonCar-yolov5s6-ap702-v230306.wts"
personcar_model1_fn=${root_dir}/${personcar_model1}
personcar_model2_fn=${root_dir}/${personcar_model2}
landing_model1="LandingMarker-resnet34-v230228.onnx"
landing_model1_fn=${root_dir}/${landing_model1}
SpireCVDet -s ${coco_model1_fn} ${root_dir}/COCO.engine 80 s
SpireCVDet -s ${coco_model2_fn} ${root_dir}/COCO_HD.engine 80 s6
SpireCVSeg -s ${coco_model3_fn} ${root_dir}/COCO_SEG.engine 80 s
SpireCVDet -s ${drone_model1_fn} ${root_dir}/Drone.engine 1 s
SpireCVDet -s ${drone_model2_fn} ${root_dir}/Drone_HD.engine 1 s6
SpireCVDet -s ${personcar_model1_fn} ${root_dir}/PersonCar.engine 8 s
SpireCVDet -s ${personcar_model2_fn} ${root_dir}/PersonCar_HD.engine 8 s6
cd /usr/src/tensorrt/bin/
./trtexec --explicitBatch --onnx=${landing_model1_fn} --saveEngine=${root_dir}/LandingMarker.engine --fp16
echo "export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH" >> ~/.bashrc

View File

@ -0,0 +1,107 @@
#!/bin/bash -e
root_dir=${HOME}"/SpireCV/models"
root_server="https://download.amovlab.com/model"
sv_params1=${HOME}"/SpireCV/sv_algorithm_params.json"
sv_params2=${HOME}"/SpireCV/sv_algorithm_params_coco_640.json"
sv_params3=${HOME}"/SpireCV/sv_algorithm_params_coco_1280.json"
camera_params1=${HOME}"/SpireCV/calib_webcam_640x480.yaml"
camera_params2=${HOME}"/SpireCV/calib_webcam_1280x720.yaml"
coco_model1="COCO-yolov5s.wts"
coco_model2="COCO-yolov5s6.wts"
coco_model3="COCO-yolov5s-seg.wts"
coco_model1_fn=${root_dir}/${coco_model1}
coco_model2_fn=${root_dir}/${coco_model2}
coco_model3_fn=${root_dir}/${coco_model3}
drone_model1="Drone-yolov5s-ap935-v230302.wts"
drone_model2="Drone-yolov5s6-ap949-v230302.wts"
drone_model1_fn=${root_dir}/${drone_model1}
drone_model2_fn=${root_dir}/${drone_model2}
personcar_model1="PersonCar-yolov5s-ap606-v230306.wts"
personcar_model2="PersonCar-yolov5s6-ap702-v230306.wts"
personcar_model1_fn=${root_dir}/${personcar_model1}
personcar_model2_fn=${root_dir}/${personcar_model2}
track_model1="dasiamrpn_model.onnx"
track_model2="dasiamrpn_kernel_cls1.onnx"
track_model3="dasiamrpn_kernel_r1.onnx"
track_model4="nanotrack_backbone_sim.onnx"
track_model5="nanotrack_head_sim.onnx"
track_model1_fn=${root_dir}/${track_model1}
track_model2_fn=${root_dir}/${track_model2}
track_model3_fn=${root_dir}/${track_model3}
track_model4_fn=${root_dir}/${track_model4}
track_model5_fn=${root_dir}/${track_model5}
landing_model1="LandingMarker-resnet34-v230228.onnx"
landing_model1_fn=${root_dir}/${landing_model1}
if [ ! -d ${root_dir} ]; then
echo -e "\033[32m[INFO]: ${root_dir} not exist, creating it ... \033[0m"
mkdir -p ${root_dir}
fi
if [ ! -f ${sv_params1} ]; then
echo -e "\033[32m[INFO]: ${sv_params1} not exist, downloading ... \033[0m"
wget -O ${sv_params1} ${root_server}/install/a-params/sv_algorithm_params.json
fi
if [ ! -f ${sv_params2} ]; then
echo -e "\033[32m[INFO]: ${sv_params2} not exist, downloading ... \033[0m"
wget -O ${sv_params2} ${root_server}/install/a-params/sv_algorithm_params_coco_640.json
fi
if [ ! -f ${sv_params3} ]; then
echo -e "\033[32m[INFO]: ${sv_params3} not exist, downloading ... \033[0m"
wget -O ${sv_params3} ${root_server}/install/a-params/sv_algorithm_params_coco_1280.json
fi
if [ ! -f ${camera_params1} ]; then
echo -e "\033[32m[INFO]: ${camera_params1} not exist, downloading ... \033[0m"
wget -O ${camera_params1} ${root_server}/install/c-params/calib_webcam_640x480.yaml
fi
if [ ! -f ${camera_params2} ]; then
echo -e "\033[32m[INFO]: ${camera_params2} not exist, downloading ... \033[0m"
wget -O ${camera_params2} ${root_server}/install/c-params/calib_webcam_1280x720.yaml
fi
if [ ! -f ${coco_model1_fn} ]; then
echo -e "\033[32m[INFO]: ${coco_model1_fn} not exist, downloading ... \033[0m"
wget -O ${coco_model1_fn} ${root_server}/install/${coco_model1}
wget -O ${coco_model2_fn} ${root_server}/install/${coco_model2}
wget -O ${coco_model3_fn} ${root_server}/install/${coco_model3}
fi
if [ ! -f ${drone_model1_fn} ]; then
echo -e "\033[32m[INFO]: ${drone_model1_fn} not exist, downloading ... \033[0m"
wget -O ${drone_model1_fn} ${root_server}/install/${drone_model1}
wget -O ${drone_model2_fn} ${root_server}/install/${drone_model2}
fi
if [ ! -f ${personcar_model1_fn} ]; then
echo -e "\033[32m[INFO]: ${personcar_model1_fn} not exist, downloading ... \033[0m"
wget -O ${personcar_model1_fn} ${root_server}/install/${personcar_model1}
wget -O ${personcar_model2_fn} ${root_server}/install/${personcar_model2}
fi
if [ ! -f ${track_model1_fn} ]; then
echo -e "\033[32m[INFO]: ${track_model1_fn} not exist, downloading ... \033[0m"
wget -O ${track_model1_fn} ${root_server}/${track_model1}
wget -O ${track_model2_fn} ${root_server}/${track_model2}
wget -O ${track_model3_fn} ${root_server}/${track_model3}
fi
if [ ! -f ${track_model4_fn} ]; then
echo -e "\033[32m[INFO]: ${track_model4_fn} not exist, downloading ... \033[0m"
wget -O ${track_model4_fn} ${root_server}/${track_model4}
wget -O ${track_model5_fn} ${root_server}/${track_model5}
fi
if [ ! -f ${landing_model1_fn} ]; then
echo -e "\033[32m[INFO]: ${landing_model1_fn} not exist, downloading ... \033[0m"
wget -O ${landing_model1_fn} ${root_server}/install/${landing_model1}
fi

View File

@ -0,0 +1,56 @@
#!/bin/sh
wget https://download.amovlab.com/model/deps/opencv-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_contrib-4.7.0.zip
wget https://download.amovlab.com/model/deps/opencv_cache-4.7.0.zip
package_dir="."
mkdir ~/opencv_build
if [ ! -d ""$package_dir"" ];then
echo "\033[31m[ERROR]: $package_dir not exist!: \033[0m"
exit 1
fi
sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
sudo add-apt-repository "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports/ xenial main multiverse restricted universe"
sudo apt update
sudo apt install -y build-essential
sudo apt install -y cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo apt install -y libjasper1 libjasper-dev
sudo apt install -y python3-dev python3-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev
sudo apt install -y libdc1394-22-dev
echo "\033[32m[INFO]:\033[0m unzip opencv-4.7.0.zip ..."
unzip -q -o $package_dir/opencv-4.7.0.zip -d ~/opencv_build
echo "\033[32m[INFO]:\033[0m unzip opencv_contrib-4.7.0.zip ..."
unzip -q -o $package_dir/opencv_contrib-4.7.0.zip -d ~/opencv_build
echo "\033[32m[INFO]:\033[0m unzip opencv_cache-4.7.0.zip ..."
unzip -q -o $package_dir/opencv_cache-4.7.0.zip -d ~/opencv_build
sudo rm opencv-4.7.0.zip
sudo rm opencv_contrib-4.7.0.zip
sudo rm opencv_cache-4.7.0.zip
cd ~/opencv_build/opencv-4.7.0
mkdir .cache
cp -r ~/opencv_build/opencv_cache-4.7.0/* ~/opencv_build/opencv-4.7.0/.cache/
mkdir build
cd build
cmake -D CMAKE_BUILD_TYPE=Release -D WITH_CUDA=OFF -D OPENCV_ENABLE_NONFREE=ON -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-4.7.0/modules ..
make -j2
sudo make install
cd
sudo rm -r ~/opencv_build

Some files were not shown because too many files have changed in this diff Show More