diff --git a/.gitignore b/.gitignore
index 259148f..e377bf6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,21 @@
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+eggs/
+.eggs/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+.idea/
+
# Prerequisites
*.d
@@ -30,3 +48,27 @@
*.exe
*.out
*.app
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# VSCode Editor
+.vscode/
+
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..bba4797
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,378 @@
+cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
+cmake_policy(SET CMP0054 NEW)
+
+set(PROJECT_VERSION 0.2.0)
+project(SpireCV VERSION ${PROJECT_VERSION} LANGUAGES CXX)
+
+add_definitions(-DAPI_EXPORTS)
+set(CMAKE_BUILD_TYPE "Release")
+
+
+## JETSON, X86_CUDA
+message(STATUS "System:${CMAKE_HOST_SYSTEM_PROCESSOR}")
+if(NOT DEFINED PLATFORM)
+ message(FATAL_ERROR "PLATFORM NOT SPECIFIED!")
+else()
+ message(STATUS "PLATFORM: ${PLATFORM}")
+ if(PLATFORM STREQUAL "JETSON")
+ add_definitions(-DPLATFORM_JETSON)
+ option(USE_CUDA "BUILD WITH CUDA." ON)
+ option(USE_GSTREAMER "BUILD WITH GSTREAMER." ON)
+ elseif(PLATFORM STREQUAL "X86_CUDA")
+ add_definitions(-DPLATFORM_X86_CUDA)
+ option(USE_CUDA "BUILD WITH CUDA." ON)
+ option(USE_FFMPEG "BUILD WITH FFMPEG." ON)
+ else()
+ message(FATAL_ERROR "UNSUPPORTED PLATFORM!")
+ endif()
+endif()
+
+
+if(USE_CUDA)
+ add_definitions(-DWITH_CUDA)
+ option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
+ find_package(CUDA REQUIRED)
+ message(STATUS "CUDA: ON")
+endif()
+
+
+if(USE_GSTREAMER)
+ add_definitions(-DWITH_GSTREAMER)
+ message(STATUS "GSTREAMER: ON")
+endif()
+
+if(USE_FFMPEG)
+ add_definitions(-DWITH_FFMPEG)
+ find_package(fmt REQUIRED)
+ set(FFMPEG_LIBS libavutil.so libavcodec.so libavformat.so libavdevice.so libavfilter.so libswscale.so)
+ message(STATUS "WITH_FFMPEG: ON")
+endif()
+
+
+add_definitions(-DWITH_OCV470)
+find_package(OpenCV 4.7 REQUIRED)
+message(STATUS "OpenCV library status:")
+message(STATUS " version: ${OpenCV_VERSION}")
+message(STATUS " libraries: ${OpenCV_LIBS}")
+message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
+
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
+include_directories(
+ ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/IOs/serial/include
+ ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/FIFO
+ ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/G1
+ ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/G2
+ ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/Q10f
+ ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src
+ ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl
+ ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda
+ ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/cuda
+ ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/tracking/ocv470
+ ${CMAKE_CURRENT_SOURCE_DIR}/video_io
+ ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/ellipse_det
+ ${CMAKE_CURRENT_SOURCE_DIR}/utils
+)
+
+if(USE_GSTREAMER)
+ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/video_io/gstreamer)
+ if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
+ include_directories(
+ "/usr/include/gstreamer-1.0"
+ "/usr/local/include/gstreamer-1.0"
+ "/usr/include/glib-2.0"
+ "/usr/lib/aarch64-linux-gnu/glib-2.0/include"
+ )
+ elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64")
+ include_directories(
+ "/usr/include/gstreamer-1.0"
+ "/usr/local/include/gstreamer-1.0"
+ "/usr/include/glib-2.0"
+ "/usr/lib/x86_64-linux-gnu/glib-2.0/include"
+ )
+ endif()
+endif()
+
+if(USE_FFMPEG)
+ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg)
+endif()
+
+
+# Public header
+set(
+ public_HEADS
+ include/sv_core.h
+ include/sv_video_base.h
+ include/sv_gimbal.h
+ include/sv_algorithm_base.h
+ include/sv_common_det.h
+ include/sv_landing_det.h
+ include/sv_tracking.h
+ include/sv_video_input.h
+ include/sv_video_output.h
+ include/sv_world.h
+)
+
+# Gimbal Sources
+set(serial_SRCS
+ gimbal_ctrl/IOs/serial/src/serial.cc
+)
+list(APPEND serial_SRCS gimbal_ctrl/IOs/serial/src/impl/unix.cc)
+list(APPEND serial_SRCS gimbal_ctrl/IOs/serial/src/impl/list_ports/list_ports_linux.cc)
+
+set(driver_SRCS
+ gimbal_ctrl/driver/src/FIFO/Ring_Fifo.cc
+)
+file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/G1/*.cpp)
+list(APPEND driver_SRCS ${DRV_LIB_FILES})
+file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/G2/*.cpp)
+list(APPEND driver_SRCS ${DRV_LIB_FILES})
+file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/Q10f/*.cpp)
+list(APPEND driver_SRCS ${DRV_LIB_FILES})
+file(GLOB DRV_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/gimbal_ctrl/driver/src/*.cpp)
+list(APPEND driver_SRCS ${DRV_LIB_FILES})
+
+set(gimbal_SRCS
+ gimbal_ctrl/sv_gimbal.cpp
+ gimbal_ctrl/sv_gimbal_io.hpp
+)
+
+# Gimbal Lib
+add_library(sv_gimbal SHARED ${serial_SRCS} ${driver_SRCS} ${gimbal_SRCS})
+target_link_libraries(sv_gimbal rt pthread)
+
+
+set(spirecv_SRCS
+ algorithm/sv_algorithm_base.cpp
+ algorithm/ellipse_det/ellipse_detector.cpp
+ algorithm/common_det/sv_common_det.cpp
+ algorithm/landing_det/sv_landing_det.cpp
+ algorithm/tracking/sv_tracking.cpp
+)
+
+file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/tracking/ocv470/*.cpp)
+list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
+file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/*.cpp)
+list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
+file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp)
+list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
+
+if(USE_CUDA)
+ list(APPEND spirecv_SRCS algorithm/common_det/cuda/yolov7/preprocess.cu)
+ file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda/*.cpp)
+ list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
+ file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/common_det/cuda/yolov7/*.cpp)
+ list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
+ file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/algorithm/landing_det/cuda/*.cpp)
+ list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
+endif()
+
+if(USE_FFMPEG)
+ file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/ffmpeg/*.cpp)
+ list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
+endif()
+
+if(USE_GSTREAMER)
+file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/video_io/gstreamer/*.cpp)
+list(APPEND spirecv_SRCS ${ALG_SRC_FILES})
+endif()
+
+
+if(USE_CUDA)
+ # CUDA
+ include_directories(/usr/local/cuda/include)
+ link_directories(/usr/local/cuda/lib64)
+ # TensorRT
+ include_directories(/usr/include/x86_64-linux-gnu)
+ link_directories(/usr/lib/x86_64-linux-gnu)
+ # Add library
+ cuda_add_library(sv_yoloplugins SHARED algorithm/common_det/cuda/yolov7/yololayer.cu)
+ target_link_libraries(sv_yoloplugins nvinfer cudart)
+
+ cuda_add_library(sv_world SHARED ${spirecv_SRCS})
+ if(USE_GSTREAMER)
+ target_link_libraries(
+ sv_world ${OpenCV_LIBS}
+ sv_yoloplugins sv_gimbal
+ nvinfer cudart
+ gstrtspserver-1.0
+ )
+ else()
+ target_link_libraries(
+ sv_world ${OpenCV_LIBS}
+ sv_yoloplugins sv_gimbal
+ nvinfer cudart
+ )
+ endif()
+
+ if(USE_FFMPEG)
+ target_link_libraries(sv_world ${FFMPEG_LIBS} fmt)
+ endif()
+
+ set(
+ YOLO_SRCS
+ algorithm/common_det/cuda/yolov7/preprocess.cu
+ algorithm/common_det/cuda/yolov7/postprocess.cpp
+ algorithm/common_det/cuda/yolov7/model.cpp
+ algorithm/common_det/cuda/yolov7/calibrator.cpp
+ )
+
+ cuda_add_executable(SpireCVDet samples/SpireCVDet.cpp ${YOLO_SRCS})
+ target_link_libraries(SpireCVDet sv_world)
+
+ cuda_add_executable(SpireCVSeg samples/SpireCVSeg.cpp ${YOLO_SRCS})
+ target_link_libraries(SpireCVSeg sv_world)
+
+elseif(PLATFORM STREQUAL "X86_CPU")
+ add_library(sv_world SHARED ${spirecv_SRCS})
+ target_link_libraries(
+ sv_world ${OpenCV_LIBS}
+ sv_gimbal
+ )
+ if(USE_GSTREAMER)
+ target_link_libraries(sv_world gstrtspserver-1.0)
+ endif()
+ if(USE_FFMPEG)
+ target_link_libraries(sv_world ${FFMPEG_LIBS} fmt)
+ endif()
+endif()
+
+
+add_executable(ArucoDetection samples/demo/aruco_detection.cpp)
+target_link_libraries(ArucoDetection sv_world)
+add_executable(CameraReading samples/demo/camera_reading.cpp)
+target_link_libraries(CameraReading sv_world)
+add_executable(CommonObjectDetection samples/demo/common_object_detection.cpp)
+target_link_libraries(CommonObjectDetection sv_world)
+add_executable(DetectionWithClickedTracking samples/demo/detection_with_clicked_tracking.cpp)
+target_link_libraries(DetectionWithClickedTracking sv_world)
+add_executable(EllipseDetection samples/demo/ellipse_detection.cpp)
+target_link_libraries(EllipseDetection sv_world)
+add_executable(LandingMarkerDetection samples/demo/landing_marker_detection.cpp)
+target_link_libraries(LandingMarkerDetection sv_world)
+add_executable(SingleObjectTracking samples/demo/single_object_tracking.cpp)
+target_link_libraries(SingleObjectTracking sv_world)
+add_executable(UdpDetectionInfoReceiver samples/demo/udp_detection_info_receiver.cpp)
+target_link_libraries(UdpDetectionInfoReceiver sv_world)
+add_executable(UdpDetectionInfoSender samples/demo/udp_detection_info_sender.cpp)
+target_link_libraries(UdpDetectionInfoSender sv_world)
+add_executable(VideoSaving samples/demo/video_saving.cpp)
+target_link_libraries(VideoSaving sv_world)
+add_executable(VideoStreaming samples/demo/video_streaming.cpp)
+target_link_libraries(VideoStreaming sv_world)
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/samples/calib)
+add_executable(CameraCalibrarion samples/calib/calibrate_camera_charuco.cpp)
+target_link_libraries(CameraCalibrarion ${OpenCV_LIBS})
+
+
+message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
+if (NOT DEFINED SV_INSTALL_PREFIX)
+ set(SV_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX})
+ message(STATUS "SV_INSTALL_PREFIX: ${SV_INSTALL_PREFIX}")
+else()
+ message(STATUS "SV_INSTALL_PREFIX: ${SV_INSTALL_PREFIX}")
+endif()
+
+
+if(USE_CUDA)
+ install(TARGETS sv_gimbal sv_yoloplugins sv_world
+ LIBRARY DESTINATION lib
+ )
+ install(TARGETS SpireCVDet SpireCVSeg
+ RUNTIME DESTINATION bin
+ )
+elseif(PLATFORM STREQUAL "X86_CPU")
+ install(TARGETS sv_world
+ LIBRARY DESTINATION lib
+ )
+endif()
+
+install(FILES ${public_HEADS}
+ DESTINATION include
+)
+
+
+if(PLATFORM STREQUAL "JETSON")
+file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[
+@PACKAGE_INIT@
+find_package(OpenCV 4 REQUIRED)
+link_directories(/usr/local/cuda/lib64)
+set(SV_INCLUDE_DIRS
+ @SV_INSTALL_PREFIX@/include
+ /usr/include/x86_64-linux-gnu
+ /usr/local/cuda/include
+ ${OpenCV_INCLUDE_DIRS}
+ /usr/include/gstreamer-1.0
+ /usr/local/include/gstreamer-1.0
+ /usr/include/glib-2.0
+ /usr/lib/aarch64-linux-gnu/glib-2.0/include
+)
+set(SV_LIBRARIES
+ @SV_INSTALL_PREFIX@/lib/libsv_yoloplugins.so
+ @SV_INSTALL_PREFIX@/lib/libsv_world.so
+ @SV_INSTALL_PREFIX@/lib/libsv_gimbal.so
+ ${OpenCV_LIBS}
+ nvinfer cudart rt pthread
+ gstrtspserver-1.0
+)
+]])
+elseif(PLATFORM STREQUAL "X86_CUDA")
+file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[
+@PACKAGE_INIT@
+find_package(OpenCV 4 REQUIRED)
+find_package(fmt REQUIRED)
+link_directories(/usr/local/cuda/lib64)
+set(SV_INCLUDE_DIRS
+ @SV_INSTALL_PREFIX@/include
+ /usr/include/x86_64-linux-gnu
+ /usr/local/cuda/include
+ ${OpenCV_INCLUDE_DIRS}
+)
+set(SV_LIBRARIES
+ @SV_INSTALL_PREFIX@/lib/libsv_yoloplugins.so
+ @SV_INSTALL_PREFIX@/lib/libsv_world.so
+ @SV_INSTALL_PREFIX@/lib/libsv_gimbal.so
+ ${OpenCV_LIBS}
+ nvinfer cudart rt pthread
+ @FFMPEG_LIBS@ fmt
+)
+]])
+elseif(PLATFORM STREQUAL "X86_CPU")
+file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[
+@PACKAGE_INIT@
+find_package(OpenCV 4 REQUIRED)
+find_package(fmt REQUIRED)
+set(SV_INCLUDE_DIRS
+ @SV_INSTALL_PREFIX@/include
+ /usr/include/x86_64-linux-gnu
+ ${OpenCV_INCLUDE_DIRS}
+)
+set(SV_LIBRARIES
+ @SV_INSTALL_PREFIX@/lib/libsv_world.so
+ @SV_INSTALL_PREFIX@/lib/libsv_gimbal.so
+ ${OpenCV_LIBS}
+ rt pthread
+ @FFMPEG_LIBS@ fmt
+)
+]])
+endif()
+
+
+include(CMakePackageConfigHelpers)
+write_basic_package_version_file(
+ ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config-version.cmake
+ VERSION ${PROJECT_VERSION}
+ COMPATIBILITY AnyNewerVersion
+)
+configure_package_config_file(${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
+ INSTALL_DESTINATION lib/cmake/${PROJECT_NAME}
+)
+install(FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config-version.cmake
+ DESTINATION lib/cmake/${PROJECT_NAME}
+)
+
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index b173686..a7cd067 100644
--- a/README.md
+++ b/README.md
@@ -1,37 +1,67 @@
-# SpireCV
+# SpireCV 智能感知算法库
-#### 介绍
-SpireCV是一个专为智能无人系统打造的边缘实时感知SDK,主要功能包括相机/吊舱控制、视频保存与推流、目标探测识别与跟踪、边缘数据管理迭代等。旨在为移动机器人开发者提供高性能、高可靠、接口简洁、功能丰富的视觉感知能力。
+## 项目概况
-#### 软件架构
-软件架构说明
+SpireCV是一个专为**智能无人系统**打造的**边缘实时感知SDK**,主要功能包括**相机/吊舱控制**、**视频保存与推流**、**目标探测识别与跟踪**、**边缘数据管理迭代**等。旨在为移动机器人开发者提供高性能、高可靠、接口简洁、功能丰富的视觉感知能力。
+ - Github:https://github.com/amov-lab/SpireCV
+ - Gitee:https://gitee.com/amovlab/SpireCV
+ - **开源项目,维护不易,还烦请点一个star收藏,谢谢支持!**
-#### 安装教程
+## 快速入门
-1. xxxx
-2. xxxx
-3. xxxx
+ - 安装及使用:[SpireCV使用手册](https://wiki.amovlab.com/public/spirecv-wiki/)
+ - 需掌握C++语言基础、CMake编译工具基础。
+ - 需要掌握OpenCV视觉库基础,了解CUDA、OpenVINO、RKNN和CANN等计算库。
+ - 需要了解ROS基本概念及基本操作。
-#### 使用说明
+ - 答疑及交流:
+ - 答疑论坛(官方定期答疑,推荐):[阿木社区-SpireCV问答专区](https://bbs.amovlab.com/)
+ - 添加微信jiayue199506(备注消息:SpireCV)进入SpireCV智能感知算法库交流群。
+ - B站搜索并关注“阿木社区”,开发团队定期直播答疑。
-1. xxxx
-2. xxxx
-3. xxxx
+## 项目框架
-#### 参与贡献
+#### 主要框架如图所示:
-1. Fork 本仓库
-2. 新建 Feat_xxx 分支
-3. 提交代码
-4. 新建 Pull Request
+
+#### 目前支持情况:
+ - **功能层**:
+ - [x] 视频算法模块(提供接口统一、性能高效、功能多样的感知算法)
+ - [x] 视频输入、保存与推流模块(提供稳定、跨平台的视频读写能力)
+ - [x] 相机、吊舱控制模块(针对典型硬件生态打通接口,易使用)
+ - [x] 感知信息交互模块(提供UDP通信协议)
+ - [x] [ROS接口](https://gitee.com/amovlab1/spirecv-ros.git)
+ - **平台层**:
+ - [x] X86+Nvidia GPU(推荐10系、20系、30系显卡)
+ - [x] Jetson(AGX Orin/Xavier、Orin NX/Nano、Xavier NX)
+ - [ ] Intel CPU(推进中)
+ - [ ] Rockchip(推进中)
+ - [ ] HUAWEI Ascend(推进中)
-#### 特技
+## 功能展示
+ - **二维码检测**
+
+
+ - **起降标志检测**
+
+
+ - **椭圆检测**
+
+
+ - **目标框选跟踪**
+
+
+ - **通用目标检测**
+
+
+ - **低延迟推流**
+
+
-1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md
-2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com)
-3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目
-4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目
-5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help)
-6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)
+## 版权声明
+
+ - 本项目受 Apache License 2.0 协议保护。
+ - 本项目仅限个人使用,请勿用于商业用途。
+ - 如利用本项目进行营利活动,阿木实验室将追究侵权行为。
diff --git a/algorithm/common_det/cuda/common_det_cuda_impl.cpp b/algorithm/common_det/cuda/common_det_cuda_impl.cpp
new file mode 100644
index 0000000..d86f467
--- /dev/null
+++ b/algorithm/common_det/cuda/common_det_cuda_impl.cpp
@@ -0,0 +1,310 @@
+#include "common_det_cuda_impl.h"
+#include
+#include
+
+#define SV_MODEL_DIR "/SpireCV/models/"
+#define SV_ROOT_DIR "/SpireCV/"
+
+
+#ifdef WITH_CUDA
+#include "yolov7/cuda_utils.h"
+#include "yolov7/logging.h"
+#include "yolov7/utils.h"
+#include "yolov7/preprocess.h"
+#include "yolov7/postprocess.h"
+#include "yolov7/model.h"
+#define TRTCHECK(status) \
+ do \
+ { \
+ auto ret = (status); \
+ if (ret != 0) \
+ { \
+ std::cerr << "Cuda failure: " << ret << std::endl; \
+ abort(); \
+ } \
+ } while (0)
+
+#define DEVICE 0 // GPU id
+#define BATCH_SIZE 1
+#define MAX_IMAGE_INPUT_SIZE_THRESH 3000 * 3000 // ensure it exceed the maximum size in the input images !
+#endif
+
+
+namespace sv {
+
+using namespace cv;
+
+
+#ifdef WITH_CUDA
+using namespace nvinfer1;
+static Logger g_nvlogger;
+const static int kOutputSize = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
+const static int kOutputSize1 = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
+const static int kOutputSize2 = 32 * (640 / 4) * (640 / 4);
+#endif
+
+
+CommonObjectDetectorCUDAImpl::CommonObjectDetectorCUDAImpl()
+{
+#ifdef WITH_CUDA
+ this->_gpu_buffers[0] = nullptr;
+ this->_gpu_buffers[1] = nullptr;
+ this->_gpu_buffers[2] = nullptr;
+ this->_cpu_output_buffer = nullptr;
+ this->_cpu_output_buffer1 = nullptr;
+ this->_cpu_output_buffer2 = nullptr;
+ this->_context = nullptr;
+ this->_engine = nullptr;
+ this->_runtime = nullptr;
+#endif
+}
+
+
+CommonObjectDetectorCUDAImpl::~CommonObjectDetectorCUDAImpl()
+{
+#ifdef WITH_CUDA
+ // Release stream and buffers
+ cudaStreamDestroy(_stream);
+ if (_gpu_buffers[0])
+ CUDA_CHECK(cudaFree(_gpu_buffers[0]));
+ if (_gpu_buffers[1])
+ CUDA_CHECK(cudaFree(_gpu_buffers[1]));
+ if (_gpu_buffers[2])
+ CUDA_CHECK(cudaFree(_gpu_buffers[2]));
+ if (_cpu_output_buffer)
+ delete[] _cpu_output_buffer;
+ if (_cpu_output_buffer1)
+ delete[] _cpu_output_buffer1;
+ if (_cpu_output_buffer2)
+ delete[] _cpu_output_buffer2;
+ cuda_preprocess_destroy();
+ // Destroy the engine
+ if (_context)
+ _context->destroy();
+ if (_engine)
+ _engine->destroy();
+ if (_runtime)
+ _runtime->destroy();
+#endif
+}
+
+
+#ifdef WITH_CUDA
+void infer(IExecutionContext& context, cudaStream_t& stream, void** gpu_buffers, float* output, int batchsize) {
+ context.enqueue(batchsize, gpu_buffers, stream, nullptr);
+ // context.enqueueV2(gpu_buffers, stream, nullptr);
+ CUDA_CHECK(cudaMemcpyAsync(output, gpu_buffers[1], batchsize * kOutputSize * sizeof(float), cudaMemcpyDeviceToHost, stream));
+ cudaStreamSynchronize(stream);
+}
+void infer_seg(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* output1, float* output2, int batchSize) {
+ context.enqueue(batchSize, buffers, stream, nullptr);
+ // context.enqueueV2(buffers, stream, nullptr);
+ CUDA_CHECK(cudaMemcpyAsync(output1, buffers[1], batchSize * kOutputSize1 * sizeof(float), cudaMemcpyDeviceToHost, stream));
+ CUDA_CHECK(cudaMemcpyAsync(output2, buffers[2], batchSize * kOutputSize2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
+ cudaStreamSynchronize(stream);
+}
+void CommonObjectDetectorCUDAImpl::_prepare_buffers(int input_h, int input_w) {
+ assert(this->_engine->getNbBindings() == 2);
+ // In order to bind the buffers, we need to know the names of the input and output tensors.
+ // Note that indices are guaranteed to be less than IEngine::getNbBindings()
+ const int inputIndex = this->_engine->getBindingIndex(kInputTensorName);
+ const int outputIndex = this->_engine->getBindingIndex(kOutputTensorName);
+ assert(inputIndex == 0);
+ assert(outputIndex == 1);
+ // Create GPU buffers on device
+ CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[0]), kBatchSize * 3 * input_h * input_w * sizeof(float)));
+ CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[1]), kBatchSize * kOutputSize * sizeof(float)));
+
+ this->_cpu_output_buffer = new float[kBatchSize * kOutputSize];
+}
+void CommonObjectDetectorCUDAImpl::_prepare_buffers_seg(int input_h, int input_w) {
+ assert(this->_engine->getNbBindings() == 3);
+ // In order to bind the buffers, we need to know the names of the input and output tensors.
+ // Note that indices are guaranteed to be less than IEngine::getNbBindings()
+ const int inputIndex = this->_engine->getBindingIndex(kInputTensorName);
+ const int outputIndex1 = this->_engine->getBindingIndex(kOutputTensorName);
+ const int outputIndex2 = this->_engine->getBindingIndex("proto");
+ assert(inputIndex == 0);
+ assert(outputIndex1 == 1);
+ assert(outputIndex2 == 2);
+
+ // Create GPU buffers on device
+ CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[0]), kBatchSize * 3 * input_h * input_w * sizeof(float)));
+ CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[1]), kBatchSize * kOutputSize1 * sizeof(float)));
+ CUDA_CHECK(cudaMalloc((void**)&(this->_gpu_buffers[2]), kBatchSize * kOutputSize2 * sizeof(float)));
+
+ // Alloc CPU buffers
+ this->_cpu_output_buffer1 = new float[kBatchSize * kOutputSize1];
+ this->_cpu_output_buffer2 = new float[kBatchSize * kOutputSize2];
+}
+void deserialize_engine(std::string& engine_name, IRuntime** runtime, ICudaEngine** engine, IExecutionContext** context) {
+ std::ifstream file(engine_name, std::ios::binary);
+ if (!file.good()) {
+ std::cerr << "read " << engine_name << " error!" << std::endl;
+ assert(false);
+ }
+ size_t size = 0;
+ file.seekg(0, file.end);
+ size = file.tellg();
+ file.seekg(0, file.beg);
+ char* serialized_engine = new char[size];
+ assert(serialized_engine);
+ file.read(serialized_engine, size);
+ file.close();
+
+ *runtime = createInferRuntime(g_nvlogger);
+ assert(*runtime);
+ *engine = (*runtime)->deserializeCudaEngine(serialized_engine, size);
+ assert(*engine);
+ *context = (*engine)->createExecutionContext();
+ assert(*context);
+ delete[] serialized_engine;
+}
+#endif
+
+
+void CommonObjectDetectorCUDAImpl::cudaDetect(
+ CommonObjectDetectorBase* base_,
+ cv::Mat img_,
+ std::vector& boxes_x_,
+ std::vector& boxes_y_,
+ std::vector& boxes_w_,
+ std::vector& boxes_h_,
+ std::vector& boxes_label_,
+ std::vector& boxes_score_,
+ std::vector& boxes_seg_
+)
+{
+#ifdef WITH_CUDA
+ int input_h = base_->getInputH();
+ int input_w = base_->getInputW();
+ bool with_segmentation = base_->withSegmentation();
+ double thrs_conf = base_->getThrsConf();
+ double thrs_nms = base_->getThrsNms();
+
+ std::vector img_batch;
+ img_batch.push_back(img_);
+ // Preprocess
+ cuda_batch_preprocess(img_batch, this->_gpu_buffers[0], input_w, input_h, this->_stream);
+
+ // Run inference
+ if (with_segmentation)
+ {
+ infer_seg(*this->_context, this->_stream, (void**)this->_gpu_buffers, this->_cpu_output_buffer1, this->_cpu_output_buffer2, kBatchSize);
+ }
+ else
+ {
+ infer(*this->_context, this->_stream, (void**)this->_gpu_buffers, this->_cpu_output_buffer, kBatchSize);
+ }
+
+ // NMS
+ std::vector> res_batch;
+ if (with_segmentation)
+ {
+ batch_nms(res_batch, this->_cpu_output_buffer1, img_batch.size(), kOutputSize1, thrs_conf, thrs_nms);
+ }
+ else
+ {
+ batch_nms(res_batch, this->_cpu_output_buffer, img_batch.size(), kOutputSize, thrs_conf, thrs_nms);
+ }
+
+ std::vector res = res_batch[0];
+ std::vector masks;
+ if (with_segmentation)
+ {
+ masks = process_mask(&(this->_cpu_output_buffer2[0]), kOutputSize2, res, input_h, input_w);
+ }
+
+
+
+ for (size_t j = 0; j < res.size(); j++) {
+ cv::Rect r = get_rect(img_, res[j].bbox, input_h, input_w);
+ if (r.x < 0) r.x = 0;
+ if (r.y < 0) r.y = 0;
+ if (r.x + r.width >= img_.cols) r.width = img_.cols - r.x - 1;
+ if (r.y + r.height >= img_.rows) r.height = img_.rows - r.y - 1;
+ if (r.width > 5 && r.height > 5)
+ {
+ // cv::rectangle(img_show, r, cv::Scalar(0, 0, 255), 2);
+ // cv::putText(img_show, vehiclenames[(int)res[j].class_id], cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 2.2, cv::Scalar(0, 0, 255), 2);
+ boxes_x_.push_back(r.x);
+ boxes_y_.push_back(r.y);
+ boxes_w_.push_back(r.width);
+ boxes_h_.push_back(r.height);
+
+ boxes_label_.push_back((int)res[j].class_id);
+ boxes_score_.push_back(res[j].conf);
+
+ if (with_segmentation)
+ {
+ cv::Mat mask_j = masks[j].clone();
+ boxes_seg_.push_back(mask_j);
+ }
+ }
+ }
+
+#endif
+}
+
+bool CommonObjectDetectorCUDAImpl::cudaSetup(CommonObjectDetectorBase* base_)
+{
+#ifdef WITH_CUDA
+ std::string dataset = base_->getDataset();
+ int input_h = base_->getInputH();
+ int input_w = base_->getInputW();
+ bool with_segmentation = base_->withSegmentation();
+ double thrs_conf = base_->getThrsConf();
+ double thrs_nms = base_->getThrsNms();
+
+ std::string engine_fn = get_home() + SV_MODEL_DIR + dataset + ".engine";
+ if (input_w == 1280)
+ {
+ engine_fn = get_home() + SV_MODEL_DIR + dataset + "_HD.engine";
+ }
+ if (with_segmentation)
+ {
+ base_->setInputH(640);
+ base_->setInputW(640);
+ engine_fn = get_home() + SV_MODEL_DIR + dataset + "_SEG.engine";
+ }
+ std::cout << "Load: " << engine_fn << std::endl;
+ if (!is_file_exist(engine_fn))
+ {
+ throw std::runtime_error("SpireCV (104) Error loading the CommonObject TensorRT model (File Not Exist)");
+ }
+
+ deserialize_engine(engine_fn, &this->_runtime, &this->_engine, &this->_context);
+ CUDA_CHECK(cudaStreamCreate(&this->_stream));
+
+ // Init CUDA preprocessing
+ cuda_preprocess_init(kMaxInputImageSize);
+
+ if (with_segmentation)
+ {
+ // Prepare cpu and gpu buffers
+ this->_prepare_buffers_seg(input_h, input_w);
+ }
+ else
+ {
+ // Prepare cpu and gpu buffers
+ this->_prepare_buffers(input_h, input_w);
+ }
+ return true;
+#endif
+ return false;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+}
+
diff --git a/algorithm/common_det/cuda/common_det_cuda_impl.h b/algorithm/common_det/cuda/common_det_cuda_impl.h
new file mode 100644
index 0000000..cb44faf
--- /dev/null
+++ b/algorithm/common_det/cuda/common_det_cuda_impl.h
@@ -0,0 +1,58 @@
+#ifndef __SV_COMMON_DET_CUDA__
+#define __SV_COMMON_DET_CUDA__
+
+#include "sv_core.h"
+#include
+#include
+#include
+#include
+#include
+
+
+
+#ifdef WITH_CUDA
+#include
+#include
+#endif
+
+
+
+namespace sv {
+
+
+class CommonObjectDetectorCUDAImpl
+{
+public:
+ CommonObjectDetectorCUDAImpl();
+ ~CommonObjectDetectorCUDAImpl();
+
+ bool cudaSetup(CommonObjectDetectorBase* base_);
+ void cudaDetect(
+ CommonObjectDetectorBase* base_,
+ cv::Mat img_,
+ std::vector& boxes_x_,
+ std::vector& boxes_y_,
+ std::vector& boxes_w_,
+ std::vector& boxes_h_,
+ std::vector& boxes_label_,
+ std::vector& boxes_score_,
+ std::vector& boxes_seg_
+ );
+
+#ifdef WITH_CUDA
+ void _prepare_buffers_seg(int input_h, int input_w);
+ void _prepare_buffers(int input_h, int input_w);
+ nvinfer1::IExecutionContext* _context;
+ nvinfer1::IRuntime* _runtime;
+ nvinfer1::ICudaEngine* _engine;
+ cudaStream_t _stream;
+ float* _gpu_buffers[3];
+ float* _cpu_output_buffer;
+ float* _cpu_output_buffer1;
+ float* _cpu_output_buffer2;
+#endif
+};
+
+
+}
+#endif
diff --git a/algorithm/common_det/cuda/yolov7/calibrator.cpp b/algorithm/common_det/cuda/yolov7/calibrator.cpp
new file mode 100644
index 0000000..ed7ce19
--- /dev/null
+++ b/algorithm/common_det/cuda/yolov7/calibrator.cpp
@@ -0,0 +1,97 @@
+#include "calibrator.h"
+#include "cuda_utils.h"
+#include "utils.h"
+
+#include
+#include
+#include
+#include
+#include
+
+static cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) {
+ int w, h, x, y;
+ float r_w = input_w / (img.cols * 1.0);
+ float r_h = input_h / (img.rows * 1.0);
+ if (r_h > r_w) {
+ w = input_w;
+ h = r_w * img.rows;
+ x = 0;
+ y = (input_h - h) / 2;
+ } else {
+ w = r_h * img.cols;
+ h = input_h;
+ x = (input_w - w) / 2;
+ y = 0;
+ }
+ cv::Mat re(h, w, CV_8UC3);
+ cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
+ cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
+ re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));
+ return out;
+}
+
+Int8EntropyCalibrator2::Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache)
+ : batchsize_(batchsize),
+ input_w_(input_w),
+ input_h_(input_h),
+ img_idx_(0),
+ img_dir_(img_dir),
+ calib_table_name_(calib_table_name),
+ input_blob_name_(input_blob_name),
+ read_cache_(read_cache) {
+ input_count_ = 3 * input_w * input_h * batchsize;
+ CUDA_CHECK(cudaMalloc(&device_input_, input_count_ * sizeof(float)));
+ read_files_in_dir(img_dir, img_files_);
+}
+
+Int8EntropyCalibrator2::~Int8EntropyCalibrator2() {
+ CUDA_CHECK(cudaFree(device_input_));
+}
+
+int Int8EntropyCalibrator2::getBatchSize() const TRT_NOEXCEPT {
+ return batchsize_;
+}
+
+bool Int8EntropyCalibrator2::getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT {
+ if (img_idx_ + batchsize_ > (int)img_files_.size()) {
+ return false;
+ }
+
+ std::vector input_imgs_;
+ for (int i = img_idx_; i < img_idx_ + batchsize_; i++) {
+ std::cout << img_files_[i] << " " << i << std::endl;
+ cv::Mat temp = cv::imread(img_dir_ + img_files_[i]);
+ if (temp.empty()) {
+ std::cerr << "Fatal error: image cannot open!" << std::endl;
+ return false;
+ }
+ cv::Mat pr_img = preprocess_img(temp, input_w_, input_h_);
+ input_imgs_.push_back(pr_img);
+ }
+ img_idx_ += batchsize_;
+ cv::Mat blob = cv::dnn::blobFromImages(input_imgs_, 1.0 / 255.0, cv::Size(input_w_, input_h_), cv::Scalar(0, 0, 0), true, false);
+
+ CUDA_CHECK(cudaMemcpy(device_input_, blob.ptr(0), input_count_ * sizeof(float), cudaMemcpyHostToDevice));
+ assert(!strcmp(names[0], input_blob_name_));
+ bindings[0] = device_input_;
+ return true;
+}
+
+const void* Int8EntropyCalibrator2::readCalibrationCache(size_t& length) TRT_NOEXCEPT {
+ std::cout << "reading calib cache: " << calib_table_name_ << std::endl;
+ calib_cache_.clear();
+ std::ifstream input(calib_table_name_, std::ios::binary);
+ input >> std::noskipws;
+ if (read_cache_ && input.good()) {
+ std::copy(std::istream_iterator(input), std::istream_iterator(), std::back_inserter(calib_cache_));
+ }
+ length = calib_cache_.size();
+ return length ? calib_cache_.data() : nullptr;
+}
+
+void Int8EntropyCalibrator2::writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT {
+ std::cout << "writing calib cache: " << calib_table_name_ << " size: " << length << std::endl;
+ std::ofstream output(calib_table_name_, std::ios::binary);
+ output.write(reinterpret_cast(cache), length);
+}
+
diff --git a/algorithm/common_det/cuda/yolov7/calibrator.h b/algorithm/common_det/cuda/yolov7/calibrator.h
new file mode 100644
index 0000000..ed77b5f
--- /dev/null
+++ b/algorithm/common_det/cuda/yolov7/calibrator.h
@@ -0,0 +1,36 @@
+#pragma once
+
+#include "macros.h"
+#include
+#include
+
+//! \class Int8EntropyCalibrator2
+//!
+//! \brief Implements Entropy calibrator 2.
+//! CalibrationAlgoType is kENTROPY_CALIBRATION_2.
+//!
+class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2 {
+ public:
+ Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true);
+
+ virtual ~Int8EntropyCalibrator2();
+ int getBatchSize() const TRT_NOEXCEPT override;
+ bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override;
+ const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override;
+ void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override;
+
+ private:
+ int batchsize_;
+ int input_w_;
+ int input_h_;
+ int img_idx_;
+ std::string img_dir_;
+ std::vector img_files_;
+ size_t input_count_;
+ std::string calib_table_name_;
+ const char* input_blob_name_;
+ bool read_cache_;
+ void* device_input_;
+ std::vector calib_cache_;
+};
+
diff --git a/algorithm/common_det/cuda/yolov7/config.h b/algorithm/common_det/cuda/yolov7/config.h
new file mode 100644
index 0000000..a6e96f0
--- /dev/null
+++ b/algorithm/common_det/cuda/yolov7/config.h
@@ -0,0 +1,55 @@
+#pragma once
+
+/* --------------------------------------------------------
+ * These configs are related to tensorrt model, if these are changed,
+ * please re-compile and re-serialize the tensorrt model.
+ * --------------------------------------------------------*/
+
+// For INT8, you need prepare the calibration dataset, please refer to
+// https://github.com/wang-xinyu/tensorrtx/tree/master/yolov5#int8-quantization
+#define USE_FP16 // set USE_INT8 or USE_FP16 or USE_FP32
+
+// These are used to define input/output tensor names,
+// you can set them to whatever you want.
+const static char* kInputTensorName = "data";
+const static char* kOutputTensorName = "prob";
+
+// Detection model and Segmentation model' number of classes
+// constexpr static int kNumClass = 80;
+
+// Classfication model's number of classes
+constexpr static int kClsNumClass = 1000;
+
+constexpr static int kBatchSize = 1;
+
+// Yolo's input width and height must by divisible by 32
+// constexpr static int kInputH = 640;
+// constexpr static int kInputW = 640;
+
+// Classfication model's input shape
+constexpr static int kClsInputH = 224;
+constexpr static int kClsInputW = 224;
+
+// Maximum number of output bounding boxes from yololayer plugin.
+// That is maximum number of output bounding boxes before NMS.
+constexpr static int kMaxNumOutputBbox = 1000;
+
+constexpr static int kNumAnchor = 3;
+
+// The bboxes whose confidence is lower than kIgnoreThresh will be ignored in yololayer plugin.
+constexpr static float kIgnoreThresh = 0.1f;
+
+/* --------------------------------------------------------
+ * These configs are NOT related to tensorrt model, if these are changed,
+ * please re-compile, but no need to re-serialize the tensorrt model.
+ * --------------------------------------------------------*/
+
+// NMS overlapping thresh and final detection confidence thresh
+const static float kNmsThresh = 0.45f;
+const static float kConfThresh = 0.5f;
+
+const static int kGpuId = 0;
+
+// If your image size is larger than 4096 * 3112, please increase this value
+const static int kMaxInputImageSize = 4096 * 3112;
+
diff --git a/algorithm/common_det/cuda/yolov7/cuda_utils.h b/algorithm/common_det/cuda/yolov7/cuda_utils.h
new file mode 100644
index 0000000..8fbd319
--- /dev/null
+++ b/algorithm/common_det/cuda/yolov7/cuda_utils.h
@@ -0,0 +1,18 @@
+#ifndef TRTX_CUDA_UTILS_H_
+#define TRTX_CUDA_UTILS_H_
+
+#include
+
+#ifndef CUDA_CHECK
+#define CUDA_CHECK(callstr)\
+ {\
+ cudaError_t error_code = callstr;\
+ if (error_code != cudaSuccess) {\
+ std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\
+ assert(0);\
+ }\
+ }
+#endif // CUDA_CHECK
+
+#endif // TRTX_CUDA_UTILS_H_
+
diff --git a/algorithm/common_det/cuda/yolov7/logging.h b/algorithm/common_det/cuda/yolov7/logging.h
new file mode 100644
index 0000000..6b79a8b
--- /dev/null
+++ b/algorithm/common_det/cuda/yolov7/logging.h
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TENSORRT_LOGGING_H
+#define TENSORRT_LOGGING_H
+
+#include "NvInferRuntimeCommon.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "macros.h"
+
+using Severity = nvinfer1::ILogger::Severity;
+
+class LogStreamConsumerBuffer : public std::stringbuf
+{
+public:
+ LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
+ : mOutput(stream)
+ , mPrefix(prefix)
+ , mShouldLog(shouldLog)
+ {
+ }
+
+ LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
+ : mOutput(other.mOutput)
+ {
+ }
+
+ ~LogStreamConsumerBuffer()
+ {
+ // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
+ // std::streambuf::pptr() gives a pointer to the current position of the output sequence
+ // if the pointer to the beginning is not equal to the pointer to the current position,
+ // call putOutput() to log the output to the stream
+ if (pbase() != pptr())
+ {
+ putOutput();
+ }
+ }
+
+ // synchronizes the stream buffer and returns 0 on success
+ // synchronizing the stream buffer consists of inserting the buffer contents into the stream,
+ // resetting the buffer and flushing the stream
+ virtual int sync()
+ {
+ putOutput();
+ return 0;
+ }
+
+ void putOutput()
+ {
+ if (mShouldLog)
+ {
+ // prepend timestamp
+ std::time_t timestamp = std::time(nullptr);
+ tm* tm_local = std::localtime(×tamp);
+ std::cout << "[";
+ std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
+ std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
+ std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
+ std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
+ std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
+ std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
+ // std::stringbuf::str() gets the string contents of the buffer
+ // insert the buffer contents pre-appended by the appropriate prefix into the stream
+ mOutput << mPrefix << str();
+ // set the buffer to empty
+ str("");
+ // flush the stream
+ mOutput.flush();
+ }
+ }
+
+ void setShouldLog(bool shouldLog)
+ {
+ mShouldLog = shouldLog;
+ }
+
+private:
+ std::ostream& mOutput;
+ std::string mPrefix;
+ bool mShouldLog;
+};
+
+//!
+//! \class LogStreamConsumerBase
+//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
+//!
+class LogStreamConsumerBase
+{
+public:
+ LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
+ : mBuffer(stream, prefix, shouldLog)
+ {
+ }
+
+protected:
+ LogStreamConsumerBuffer mBuffer;
+};
+
+//!
+//! \class LogStreamConsumer
+//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
+//! Order of base classes is LogStreamConsumerBase and then std::ostream.
+//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
+//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
+//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
+//! Please do not change the order of the parent classes.
+//!
+class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
+{
+public:
+ //! \brief Creates a LogStreamConsumer which logs messages with level severity.
+ //! Reportable severity determines if the messages are severe enough to be logged.
+ LogStreamConsumer(Severity reportableSeverity, Severity severity)
+ : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
+ , std::ostream(&mBuffer) // links the stream buffer with the stream
+ , mShouldLog(severity <= reportableSeverity)
+ , mSeverity(severity)
+ {
+ }
+
+ LogStreamConsumer(LogStreamConsumer&& other)
+ : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
+ , std::ostream(&mBuffer) // links the stream buffer with the stream
+ , mShouldLog(other.mShouldLog)
+ , mSeverity(other.mSeverity)
+ {
+ }
+
+ void setReportableSeverity(Severity reportableSeverity)
+ {
+ mShouldLog = mSeverity <= reportableSeverity;
+ mBuffer.setShouldLog(mShouldLog);
+ }
+
+private:
+ static std::ostream& severityOstream(Severity severity)
+ {
+ return severity >= Severity::kINFO ? std::cout : std::cerr;
+ }
+
+ static std::string severityPrefix(Severity severity)
+ {
+ switch (severity)
+ {
+ case Severity::kINTERNAL_ERROR: return "[F] ";
+ case Severity::kERROR: return "[E] ";
+ case Severity::kWARNING: return "[W] ";
+ case Severity::kINFO: return "[I] ";
+ case Severity::kVERBOSE: return "[V] ";
+ default: assert(0); return "";
+ }
+ }
+
+ bool mShouldLog;
+ Severity mSeverity;
+};
+
+//! \class Logger
+//!
+//! \brief Class which manages logging of TensorRT tools and samples
+//!
+//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
+//! and supports logging two types of messages:
+//!
+//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
+//! - Test pass/fail messages
+//!
+//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
+//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
+//!
+//! In the future, this class could be extended to support dumping test results to a file in some standard format
+//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
+//!
+//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
+//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
+//! library and messages coming from the sample.
+//!
+//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
+//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
+//! object.
+
+class Logger : public nvinfer1::ILogger
+{
+public:
+ Logger(Severity severity = Severity::kWARNING)
+ : mReportableSeverity(severity)
+ {
+ }
+
+ //!
+ //! \enum TestResult
+ //! \brief Represents the state of a given test
+ //!
+ enum class TestResult
+ {
+ kRUNNING, //!< The test is running
+ kPASSED, //!< The test passed
+ kFAILED, //!< The test failed
+ kWAIVED //!< The test was waived
+ };
+
+ //!
+ //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
+ //! \return The nvinfer1::ILogger associated with this Logger
+ //!
+ //! TODO Once all samples are updated to use this method to register the logger with TensorRT,
+ //! we can eliminate the inheritance of Logger from ILogger
+ //!
+ nvinfer1::ILogger& getTRTLogger()
+ {
+ return *this;
+ }
+
+ //!
+ //! \brief Implementation of the nvinfer1::ILogger::log() virtual method
+ //!
+ //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
+ //! inheritance from nvinfer1::ILogger
+ //!
+ void log(Severity severity, const char* msg) TRT_NOEXCEPT override
+ {
+ LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
+ }
+
+ //!
+ //! \brief Method for controlling the verbosity of logging output
+ //!
+ //! \param severity The logger will only emit messages that have severity of this level or higher.
+ //!
+ void setReportableSeverity(Severity severity)
+ {
+ mReportableSeverity = severity;
+ }
+
+ //!
+ //! \brief Opaque handle that holds logging information for a particular test
+ //!
+ //! This object is an opaque handle to information used by the Logger to print test results.
+ //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
+ //! with Logger::reportTest{Start,End}().
+ //!
+ class TestAtom
+ {
+ public:
+ TestAtom(TestAtom&&) = default;
+
+ private:
+ friend class Logger;
+
+ TestAtom(bool started, const std::string& name, const std::string& cmdline)
+ : mStarted(started)
+ , mName(name)
+ , mCmdline(cmdline)
+ {
+ }
+
+ bool mStarted;
+ std::string mName;
+ std::string mCmdline;
+ };
+
+ //!
+ //! \brief Define a test for logging
+ //!
+ //! \param[in] name The name of the test. This should be a string starting with
+ //! "TensorRT" and containing dot-separated strings containing
+ //! the characters [A-Za-z0-9_].
+ //! For example, "TensorRT.sample_googlenet"
+ //! \param[in] cmdline The command line used to reproduce the test
+ //
+ //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+ //!
+ static TestAtom defineTest(const std::string& name, const std::string& cmdline)
+ {
+ return TestAtom(false, name, cmdline);
+ }
+
+ //!
+ //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
+ //! as input
+ //!
+ //! \param[in] name The name of the test
+ //! \param[in] argc The number of command-line arguments
+ //! \param[in] argv The array of command-line arguments (given as C strings)
+ //!
+ //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+ static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
+ {
+ auto cmdline = genCmdlineString(argc, argv);
+ return defineTest(name, cmdline);
+ }
+
+ //!
+ //! \brief Report that a test has started.
+ //!
+ //! \pre reportTestStart() has not been called yet for the given testAtom
+ //!
+ //! \param[in] testAtom The handle to the test that has started
+ //!
+ static void reportTestStart(TestAtom& testAtom)
+ {
+ reportTestResult(testAtom, TestResult::kRUNNING);
+ assert(!testAtom.mStarted);
+ testAtom.mStarted = true;
+ }
+
+ //!
+ //! \brief Report that a test has ended.
+ //!
+ //! \pre reportTestStart() has been called for the given testAtom
+ //!
+ //! \param[in] testAtom The handle to the test that has ended
+ //! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
+ //! TestResult::kFAILED, TestResult::kWAIVED
+ //!
+ static void reportTestEnd(const TestAtom& testAtom, TestResult result)
+ {
+ assert(result != TestResult::kRUNNING);
+ assert(testAtom.mStarted);
+ reportTestResult(testAtom, result);
+ }
+
+ static int reportPass(const TestAtom& testAtom)
+ {
+ reportTestEnd(testAtom, TestResult::kPASSED);
+ return EXIT_SUCCESS;
+ }
+
+ static int reportFail(const TestAtom& testAtom)
+ {
+ reportTestEnd(testAtom, TestResult::kFAILED);
+ return EXIT_FAILURE;
+ }
+
+ static int reportWaive(const TestAtom& testAtom)
+ {
+ reportTestEnd(testAtom, TestResult::kWAIVED);
+ return EXIT_SUCCESS;
+ }
+
+ static int reportTest(const TestAtom& testAtom, bool pass)
+ {
+ return pass ? reportPass(testAtom) : reportFail(testAtom);
+ }
+
+ Severity getReportableSeverity() const
+ {
+ return mReportableSeverity;
+ }
+
+private:
+ //!
+ //! \brief returns an appropriate string for prefixing a log message with the given severity
+ //!
+ static const char* severityPrefix(Severity severity)
+ {
+ switch (severity)
+ {
+ case Severity::kINTERNAL_ERROR: return "[F] ";
+ case Severity::kERROR: return "[E] ";
+ case Severity::kWARNING: return "[W] ";
+ case Severity::kINFO: return "[I] ";
+ case Severity::kVERBOSE: return "[V] ";
+ default: assert(0); return "";
+ }
+ }
+
+ //!
+ //! \brief returns an appropriate string for prefixing a test result message with the given result
+ //!
+ static const char* testResultString(TestResult result)
+ {
+ switch (result)
+ {
+ case TestResult::kRUNNING: return "RUNNING";
+ case TestResult::kPASSED: return "PASSED";
+ case TestResult::kFAILED: return "FAILED";
+ case TestResult::kWAIVED: return "WAIVED";
+ default: assert(0); return "";
+ }
+ }
+
+ //!
+ //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
+ //!
+ static std::ostream& severityOstream(Severity severity)
+ {
+ return severity >= Severity::kINFO ? std::cout : std::cerr;
+ }
+
+ //!
+ //! \brief method that implements logging test results
+ //!
+ static void reportTestResult(const TestAtom& testAtom, TestResult result)
+ {
+ severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
+ << testAtom.mCmdline << std::endl;
+ }
+
+ //!
+ //! \brief generate a command line string from the given (argc, argv) values
+ //!
+ static std::string genCmdlineString(int argc, char const* const* argv)
+ {
+ std::stringstream ss;
+ for (int i = 0; i < argc; i++)
+ {
+ if (i > 0)
+ ss << " ";
+ ss << argv[i];
+ }
+ return ss.str();
+ }
+
+ Severity mReportableSeverity;
+};
+
+namespace
+{
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
+//!
+//! Example usage:
+//!
+//! LOG_VERBOSE(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
+{
+ return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
+//!
+//! Example usage:
+//!
+//! LOG_INFO(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_INFO(const Logger& logger)
+{
+ return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
+//!
+//! Example usage:
+//!
+//! LOG_WARN(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_WARN(const Logger& logger)
+{
+ return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
+//!
+//! Example usage:
+//!
+//! LOG_ERROR(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_ERROR(const Logger& logger)
+{
+ return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
+// ("fatal" severity)
+//!
+//! Example usage:
+//!
+//! LOG_FATAL(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_FATAL(const Logger& logger)
+{
+ return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
+}
+
+} // anonymous namespace
+
+#endif // TENSORRT_LOGGING_H
diff --git a/algorithm/common_det/cuda/yolov7/macros.h b/algorithm/common_det/cuda/yolov7/macros.h
new file mode 100644
index 0000000..17339a2
--- /dev/null
+++ b/algorithm/common_det/cuda/yolov7/macros.h
@@ -0,0 +1,29 @@
+#ifndef __MACROS_H
+#define __MACROS_H
+
+#include
+
+#ifdef API_EXPORTS
+#if defined(_MSC_VER)
+#define API __declspec(dllexport)
+#else
+#define API __attribute__((visibility("default")))
+#endif
+#else
+
+#if defined(_MSC_VER)
+#define API __declspec(dllimport)
+#else
+#define API
+#endif
+#endif // API_EXPORTS
+
+#if NV_TENSORRT_MAJOR >= 8
+#define TRT_NOEXCEPT noexcept
+#define TRT_CONST_ENQUEUE const
+#else
+#define TRT_NOEXCEPT
+#define TRT_CONST_ENQUEUE
+#endif
+
+#endif // __MACROS_H
diff --git a/algorithm/common_det/cuda/yolov7/model.cpp b/algorithm/common_det/cuda/yolov7/model.cpp
new file mode 100644
index 0000000..467cd47
--- /dev/null
+++ b/algorithm/common_det/cuda/yolov7/model.cpp
@@ -0,0 +1,628 @@
+#include "model.h"
+#include "calibrator.h"
+#include "config.h"
+#include "yololayer.h"
+
+#include
+#include
+#include