diff --git a/CMakeLists.txt b/CMakeLists.txt index e2d3c21..8ac6758 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -299,6 +299,8 @@ add_executable(SingleObjectTracking samples/demo/single_object_tracking.cpp) target_link_libraries(SingleObjectTracking sv_world) add_executable(MultipleObjectTracking samples/demo/multiple_object_tracking.cpp) target_link_libraries(MultipleObjectTracking sv_world) +add_executable(EvalMOTMetric samples/demo/eval_MOT_metric.cpp) +target_link_libraries(EvalMOTMetric -lstdc++fs sv_world) add_executable(ColorLineDetection samples/demo/color_line_detect.cpp) target_link_libraries(ColorLineDetection sv_world) add_executable(UdpDetectionInfoReceiver samples/demo/udp_detection_info_receiver.cpp) @@ -364,14 +366,12 @@ if(PLATFORM STREQUAL "JETSON") file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[ @PACKAGE_INIT@ find_package(OpenCV 4 REQUIRED) -find_package(Eigen3 REQUIRED) link_directories(/usr/local/cuda/lib64) set(SV_INCLUDE_DIRS @SV_INSTALL_PREFIX@/include /usr/include/x86_64-linux-gnu /usr/local/cuda/include ${OpenCV_INCLUDE_DIRS} - ${EIGEN3_INCLUDE_DIRS} /usr/include/gstreamer-1.0 /usr/local/include/gstreamer-1.0 /usr/include/glib-2.0 @@ -390,7 +390,6 @@ elseif(PLATFORM STREQUAL "X86_CUDA") file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[ @PACKAGE_INIT@ find_package(OpenCV 4 REQUIRED) -find_package(Eigen3 REQUIRED) find_package(fmt REQUIRED) link_directories(/usr/local/cuda/lib64) set(SV_INCLUDE_DIRS @@ -398,7 +397,6 @@ set(SV_INCLUDE_DIRS /usr/include/x86_64-linux-gnu /usr/local/cuda/include ${OpenCV_INCLUDE_DIRS} - ${EIGEN3_INCLUDE_DIRS} ) set(SV_LIBRARIES @SV_INSTALL_PREFIX@/lib/libsv_yoloplugins.so @@ -413,13 +411,11 @@ elseif(PLATFORM STREQUAL "X86_INTEL") file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/build/${PROJECT_NAME}Config.cmake.in [[ @PACKAGE_INIT@ find_package(OpenCV 4 REQUIRED) -find_package(Eigen3 REQUIRED) find_package(fmt REQUIRED) set(SV_INCLUDE_DIRS @SV_INSTALL_PREFIX@/include /usr/include/x86_64-linux-gnu ${OpenCV_INCLUDE_DIRS} - ${EIGEN3_INCLUDE_DIRS} ) set(SV_LIBRARIES @SV_INSTALL_PREFIX@/lib/libsv_world.so diff --git a/algorithm/mot/sv_mot.cpp b/algorithm/mot/sv_mot.cpp index 5d886fd..c60f376 100644 --- a/algorithm/mot/sv_mot.cpp +++ b/algorithm/mot/sv_mot.cpp @@ -23,8 +23,9 @@ MultipleObjectTracker::~MultipleObjectTracker() delete this->_sort_impl; } -void MultipleObjectTracker::track(cv::Mat img_, TargetsInFrame& tgts_) +sv::TargetsInFrame MultipleObjectTracker::track(cv::Mat img_, TargetsInFrame& tgts_) { + sv::TargetsInFrame person_tgts(tgts_.frame_id); if (!this->_params_loaded) { this->_load(); @@ -33,8 +34,16 @@ void MultipleObjectTracker::track(cv::Mat img_, TargetsInFrame& tgts_) if ("sort" == this->_algorithm && this->_sort_impl) { this->_detector->detect(img_, tgts_); - this->_sort_impl->update(tgts_); + for (auto target : tgts_.targets) + { + if (target.category_id == 0) + { + person_tgts.targets.push_back(target); + } + } + this->_sort_impl->update(person_tgts); } + return person_tgts; } void MultipleObjectTracker::init(CommonObjectDetector* detector_) @@ -210,22 +219,27 @@ void SORT::update(TargetsInFrame& tgts) tracklet.age = 0; tracklet.hits = 1; - tracklet.misses = 0; + //tracklet.misses = 0; tracklet.frame_id = tgts.frame_id; tracklet.category_id = tgts.targets[i].category_id; - tracklet.tentative = true; - + if (tgts.frame_id == 0) + { + tracklet.tentative = false; + } + else + { + tracklet.tentative = true; + } // initate the motion pair, Matrix > motion = kf.initiate(tracklet.bbox); tracklet.mean = motion.first; tracklet.covariance = motion.second; this->_tracklets.push_back(tracklet); - } + } } else { - // cout << "frame id:" << tgts.frame_id << endl; for (int i=0; i > iouMatrix(this->_tracklets.size(), vector (tgts.targets.size(), 0)); for (int i=0; i_tracklets.size(); i++) @@ -278,18 +292,15 @@ void SORT::update(TargetsInFrame& tgts) std::vector > ().swap(iouMatrix); for (int i=0; i_next_tracklet_id; - tracklet.bbox << box.x1+(box.x2-box.x1)/2, (double)(box.y1+(box.y2-box.y1)/2), box.x2-box.x1, box.y2-box.y1; + tracklet.bbox << box.x1+(box.x2-box.x1)/2, (double)(box.y1+(box.y2-box.y1)/2), box.x2-box.x1, box.y2-box.y1; // c_x, c_y, w, h tracklet.age = 0; tracklet.hits = 1; - tracklet.misses = 0; tracklet.frame_id = tgts.frame_id; tracklet.category_id = tgts.targets[i].category_id; tracklet.tentative = true; @@ -320,7 +331,7 @@ void SORT::update(TargetsInFrame& tgts) { tracklet.tentative = false; } - if ((tgts.frame_id-tracklet.frame_id <= _max_age) || (!tracklet.tentative && tracklet.frame_id == tgts.frame_id)) + if ((tgts.frame_id-tracklet.frame_id <= _max_age) && !(tracklet.tentative && tracklet.frame_id != tgts.frame_id)) { _new_tracklets.push_back(tracklet); } @@ -456,9 +467,6 @@ vector > SORT::_hungarian(vector > costMatrix) std::vector> assignmentPairs; for (size_t row = 0; row < numRows; ++row) { int col = rowAssignment[row]; - //if (col != -1) { - // assignmentPairs.emplace_back(row, col); - // } if (col != -1) { if (col >= numCols) { col = -1; diff --git a/include/sv_mot.h b/include/sv_mot.h index c5c639c..07fb5b8 100644 --- a/include/sv_mot.h +++ b/include/sv_mot.h @@ -8,8 +8,8 @@ #include #include #include -//#include -#include +#include + namespace sv { @@ -24,7 +24,7 @@ public: ~MultipleObjectTracker(); void init(CommonObjectDetector* detector_); - void track(cv::Mat img_, TargetsInFrame& tgts_); + sv::TargetsInFrame track(cv::Mat img_, TargetsInFrame& tgts_); private: void _load(); diff --git a/samples/demo/eval_MOT_metric.cpp b/samples/demo/eval_MOT_metric.cpp new file mode 100644 index 0000000..2bdc5d2 --- /dev/null +++ b/samples/demo/eval_MOT_metric.cpp @@ -0,0 +1,76 @@ +#include +#include +#include +// 包含SpireCV SDK头文件 +#include + +using namespace std; +namespace fs = std::experimental::filesystem; + +int main(int argc, char *argv[]) { + // 实例化 + sv::CommonObjectDetector cod; + // 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件 + cod.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml"); + cod.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json"); + sv::MultipleObjectTracker mot; + // 手动导入相机参数,如果使用Amov的G1等吊舱或相机,则可以忽略该步骤,将自动下载相机参数文件 + mot.loadCameraParams(sv::get_home() + "/SpireCV/confs/calib_webcam_1280x720.yaml"); + mot.loadAlgorithmParams(sv::get_home() + "/SpireCV/confs/sv_algorithm_params.json"); + mot.init(&cod); + + // 打开摄像头 + /* + sv::Camera cap; + cap.setWH(mot.image_width, mot.image_height); + cap.setFps(30); + cap.open(sv::CameraType::V4L2CAM, 0); // CameraID 0 + */ + std::string mot17_folder_path = sv::get_home()+"/SpireCV/dataset/MOT17/train/"; + std::string pred_file_path = sv::get_home()+"/SpireCV/dataset/pred_mot17/data/"; + for (auto & seq_path : std::experimental::filesystem::directory_iterator(mot17_folder_path)) + { + // mkdir pred dirs and touch pred_files + string pred_file = pred_file_path + seq_path.path().filename().string() + ".txt"; + fs::create_directories(pred_file_path); + std::ofstream file(pred_file); + // listdir seqence images + string seq_image_paths = mot17_folder_path + seq_path.path().filename().string() + "/img1"; + // cout << seq_image_paths < seq_image_file_path; + cv::glob(seq_image_paths, seq_image_file_path); + + //eval MOT algorithms + cv::Mat img; + int frame_id = 0; + while (frame_id < seq_image_file_path.size()) + { + img = cv::imread(seq_image_file_path[frame_id]); + // 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame + sv::TargetsInFrame tgts(frame_id++); + // 读取一帧图像到img + //cap.read(img); + //cv::resize(img, img, cv::Size(mot.image_width, mot.image_height)); + + // 执行通用目标检测 + sv::TargetsInFrame person_tgts = mot.track(img, tgts); + // 可视化检测结果,叠加到img上 + sv::drawTargetsInFrame(img, person_tgts); + // printf(" Frame Size (width, height) = (%d, %d)\n", tgts.width, tgts.height); + for (auto target : person_tgts.targets) + { + int center_x = int(target.cx * tgts.width); + int center_y = int(target.cy * tgts.height); + int width = int(target.w * tgts.width); + int height = int(target.h * tgts.height); + double conf = target.score; + file << frame_id << ","<< target.tracked_id << "," << center_x - width / 2 << "," << center_y - height / 2 << "," << width << "," << height << "," << conf << "," << "-1,-1,-1" << endl; + // file << frame_id << ","<< target.tracked_id << "," << center_x << "," << center_y << "," << width << "," << height << "," << conf << "," << "-1,-1,-1" << endl; + } + cv::imshow("img", img); + cv::waitKey(10); + } + file.close(); + } + return 0; +} \ No newline at end of file diff --git a/samples/demo/multiple_object_tracking.cpp b/samples/demo/multiple_object_tracking.cpp index aa7a4b9..66d6f6d 100644 --- a/samples/demo/multiple_object_tracking.cpp +++ b/samples/demo/multiple_object_tracking.cpp @@ -34,9 +34,9 @@ int main(int argc, char *argv[]) { cv::resize(img, img, cv::Size(mot.image_width, mot.image_height)); // 执行通用目标检测 - mot.track(img, tgts); + sv::TargetsInFrame person_tgts = mot.track(img, tgts); // 可视化检测结果,叠加到img上 - sv::drawTargetsInFrame(img, tgts); + sv::drawTargetsInFrame(img, person_tgts); // 显示检测结果img cv::imshow("img", img); diff --git a/samples/test/mot_metrics/eval_mot_metric_gt.sh b/samples/test/mot_metrics/eval_mot_metric_gt.sh new file mode 100755 index 0000000..a41a12c --- /dev/null +++ b/samples/test/mot_metrics/eval_mot_metric_gt.sh @@ -0,0 +1,9 @@ +python3 run_mot_challenge.py \ +--BENCHMARK MOT17 \ +--GT_FOLDER ../../../dataset/MOT17/train \ +--TRACKERS_FOLDER ../../../dataset/pred_mot17 \ +--SKIP_SPLIT_FOL True \ +--SEQMAP_FILE ../../../dataset/MOT17/val_seqmap.txt \ +--TRACKERS_TO_EVAL '' \ +--METRICS HOTA CLEAR Identity \ +#--show diff --git a/samples/test/mot_metrics/run_mot_challenge.py b/samples/test/mot_metrics/run_mot_challenge.py new file mode 100755 index 0000000..d6c71b4 --- /dev/null +++ b/samples/test/mot_metrics/run_mot_challenge.py @@ -0,0 +1,93 @@ + +""" run_mot_challenge.py + +Run example: +run_mot_challenge.py --USE_PARALLEL False --METRICS Hota --TRACKERS_TO_EVAL Lif_T + +Command Line Arguments: Defaults, # Comments + Eval arguments: + 'USE_PARALLEL': False, + 'NUM_PARALLEL_CORES': 8, + 'BREAK_ON_ERROR': True, + 'PRINT_RESULTS': True, + 'PRINT_ONLY_COMBINED': False, + 'PRINT_CONFIG': True, + 'TIME_PROGRESS': True, + 'OUTPUT_SUMMARY': True, + 'OUTPUT_DETAILED': True, + 'PLOT_CURVES': True, + Dataset arguments: + 'GT_FOLDER': os.path.join(code_path, 'data/gt/mot_challenge/'), # Location of GT data + 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/mot_challenge/'), # Trackers location + 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER) + 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder) + 'CLASSES_TO_EVAL': ['pedestrian'], # Valid: ['pedestrian'] + 'BENCHMARK': 'MOT17', # Valid: 'MOT17', 'MOT16', 'MOT20', 'MOT15' + 'SPLIT_TO_EVAL': 'train', # Valid: 'train', 'test', 'all' + 'INPUT_AS_ZIP': False, # Whether tracker input files are zipped + 'PRINT_CONFIG': True, # Whether to print current config + 'DO_PREPROC': True, # Whether to perform preprocessing (never done for 2D_MOT_2015) + 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER + 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER + Metric arguments: + 'METRICS': ['HOTA', 'CLEAR', 'Identity', 'VACE'] +""" + +import sys +import os +import argparse +from multiprocessing import freeze_support + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +import trackeval # noqa: E402 + +if __name__ == '__main__': + freeze_support() + + # Command line interface: + default_eval_config = trackeval.Evaluator.get_default_eval_config() + default_eval_config['DISPLAY_LESS_PROGRESS'] = False + default_dataset_config = trackeval.datasets.MotChallenge2DBox.get_default_dataset_config() + default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity'], 'THRESHOLD': 0.5} + config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs + parser = argparse.ArgumentParser() + for setting in config.keys(): + if type(config[setting]) == list or type(config[setting]) == type(None): + parser.add_argument("--" + setting, nargs='+') + else: + parser.add_argument("--" + setting) + args = parser.parse_args().__dict__ + args['SEQMAP_FILE'] = args['SEQMAP_FILE'][0] + # args['SEQ_INFO']={'MOT17-02-DPM':600} + for setting in args.keys(): + if args[setting] is not None: + if type(config[setting]) == type(True): + if args[setting] == 'True': + x = True + elif args[setting] == 'False': + x = False + else: + raise Exception('Command line parameter ' + setting + 'must be True or False') + elif type(config[setting]) == type(1): + x = int(args[setting]) + elif type(args[setting]) == type(None): + x = None + elif setting == 'SEQ_INFO': + x = dict(zip(args[setting], [None]*len(args[setting]))) + else: + x = args[setting] + config[setting] = x + eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()} + dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()} + metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()} + + # Run code + evaluator = trackeval.Evaluator(eval_config) + dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)] + metrics_list = [] + for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity, trackeval.metrics.VACE]: + if metric.get_name() in metrics_config['METRICS']: + metrics_list.append(metric(metrics_config)) + if len(metrics_list) == 0: + raise Exception('No metrics selected for evaluation') + evaluator.evaluate(dataset_list, metrics_list) diff --git a/samples/test/mot_metrics/write_val_seqmap.py b/samples/test/mot_metrics/write_val_seqmap.py new file mode 100755 index 0000000..087909b --- /dev/null +++ b/samples/test/mot_metrics/write_val_seqmap.py @@ -0,0 +1,23 @@ +import os +import os.path as osp + + + +def makedirs(path): + if not osp.exists(path): + os.makedirs(path) + else: + print('this file is already exists.') + + +if __name__=="__main__": + dst_path = '/home/bitwrj/SpireCV/dataset/MOT17' + mode = 'train' + dst_file_name = 'val_seqmap.txt' + dst_file = osp.join(dst_path,dst_file_name) + test_files = os.listdir(osp.join(dst_path,mode)) + test_files.sort() + with open(dst_file, 'w+') as f: + f.writelines('name\n') + for test_file in test_files: + f.writelines(test_file+'\n')