Pre Merge pull request !14 from Daniel/lxm

This commit is contained in:
Daniel 2023-08-15 01:21:54 +00:00 committed by Gitee
commit 3fb771cab2
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
4 changed files with 190 additions and 0 deletions

View File

@ -275,6 +275,9 @@ target_link_libraries(GimbalUdpDetectionInfoSender sv_world)
add_executable(EvalFpsOnVideo samples/test/eval_fps_on_video.cpp)
target_link_libraries(EvalFpsOnVideo sv_world)
add_executable(EvalModelOnCocoVal samples/test/eval_mAP_on_coco_val/eval_mAP_on_coco_val.cpp)
target_link_libraries(EvalModelOnCocoVal sv_world)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/samples/calib)
add_executable(CameraCalibrarion samples/calib/calibrate_camera_charuco.cpp)
target_link_libraries(CameraCalibrarion ${OpenCV_LIBS})

View File

@ -0,0 +1,25 @@
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import os
if __name__ == '__main__':
path = os.path.abspath(os.path.join(os.getcwd(),"../../.."))
pred_json = 'pd_coco.json'
anno_json = path + '/val2017/instances_val2017.json'
# use COCO API to load forecast results and annotations
cocoGt = COCO(anno_json)
cocoDt = cocoGt.loadRes(pred_json)
# create COCO eval object
cocoEval = COCOeval(cocoGt, cocoDt,'bbox')
# assessment
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
# save results
with open('coco_eval.txt', 'w') as f:
f.write(str(cocoEval.stats))

View File

@ -0,0 +1,100 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
using namespace cv;
//extract name
std::string GetImageFileName(const std::string& imagePath) {
size_t lastSlash = imagePath.find_last_of("/\\");
if (lastSlash == std::string::npos) {
return imagePath;
} else {
std::string fileName = imagePath.substr(lastSlash + 1);
size_t lastDot = fileName.find_last_of(".");
if (lastDot != std::string::npos) {
return fileName.substr(0, lastDot);
}
return fileName;
}
}
int main(int argc, char *argv[])
{
// 实例化 通用目标 检测器类
sv::CommonObjectDetector cod;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
//load data
string val_path = sv::get_home() + "/SpireCV/val2017/val2017";
vector<string> val_image;
glob(val_path, val_image, false);
if (val_image.size() == 0)
{
printf("val_image error!!!\n");
exit(1);
}
//preds folder
std::string folder = sv::get_home() + "/SpireCV/val2017/preds";
int checkStatus = std::system(("if [ -d \"" + folder + "\" ]; then echo; fi").c_str());
if(checkStatus == 0)
{
int removeStatus = std::system(("rm -rf \"" + folder + "\"").c_str());
if(removeStatus != 0)
{
printf("remove older preds folder error!!!\n");
exit(1);
}
}
int status = std::system(("mkdir \""+folder+"\"").c_str());
if(status != 0)
{
printf("create preds folder error!!!\n");
exit(1);
}
for (int i = 0; i < val_image.size(); i++) {
//create pred file
std::string val_image_name = GetImageFileName(val_image[i]);
std::string filename = folder+"/"+ val_image_name + ".txt";
std::ofstream file(filename);
file.is_open();
file<<std::fixed<<std::setprecision(6);
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(i);
cv::Mat img = imread(val_image[i]);
int rows = img.rows;
int cols = img.cols;
// 执行通用目标检测
cod.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// reslusts
for (int j = 0; j < tgts.targets.size(); j++)
{
sv::Box b;
tgts.targets[j].getBox(b);
file<<tgts.targets[j].category_id<<" "<<(float)(b.x1+b.x2)/(2*cols)<<" "<<(float)(b.y1+b.y2)/(2*rows)<<" "<<(float)(b.x2-b.x1)/cols<<" "<<(float)(b.y2-b.y1)/rows<<" "<<(float)tgts.targets[j].score<<"\n";
}
file.close();
cv::imshow("image", img);
cv::waitKey(500);
img.release();
cv::destroyAllWindows();
}
return 0;
}

View File

@ -0,0 +1,62 @@
import datetime
import json
import os
import cv2
# revert prediction results to coco_json
path = os.path.abspath(os.path.join(os.getcwd(),"../../.."))
# all files dir
images_path = path + '/val2017/val2017'
preds_path = path + '/val2017/preds'
coco_json_save = 'pd_coco.json'
# config coco_json
coco_json = []
# remap the id of the coco dataset
id_map = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7, 9: 8, 10: 9, 11: 10, 13: 11, 14: 12, 15: 13, 16: 14, 17: 15, 18: 16, 19: 17, 20: 18, 21: 19, 22: 20, 23: 21, 24: 22, 25: 23, 27: 24, 28: 25, 31: 26, 32: 27, 33: 28, 34: 29, 35: 30, 36: 31, 37: 32, 38: 33, 39: 34, 40: 35, 41: 36, 42: 37, 43: 38, 44: 39, 46: 40, 47: 41, 48: 42, 49: 43, 50: 44, 51: 45, 52: 46, 53: 47, 54: 48, 55: 49, 56: 50, 57: 51, 58: 52, 59: 53, 60: 54, 61: 55, 62: 56, 63: 57, 64: 58, 65: 59, 67: 60, 70: 61, 72: 62, 73: 63, 74: 64, 75: 65, 76: 66, 77: 67, 78: 68, 79: 69, 80: 70, 81: 71, 82: 72, 84: 73, 85: 74, 86: 75, 87: 76, 88: 77, 89: 78, 90: 79}
reid_mp = {value: key for key, value in id_map.items()}
# load images dir
images = os.listdir(images_path)
for image in images:
print(image)
# get image name
image_name, image_suffix = os.path.splitext(image)
# get image W and H
image_path = images_path + '/' + image
img = cv2.imread(image_path)
height, width, _ = img.shape
# read pred's txt
pred_path = preds_path + '/' + image_name + '.txt'
if not os.path.exists(pred_path):
continue
with open(pred_path, 'r') as f:
preds = f.readlines()
preds = [l.strip() for l in preds]
for j, pred in enumerate(preds):
pred = pred.split(' ')
category_id = int(pred[0])
x = float(pred[1]) * width
y = float(pred[2]) * height
w = float(pred[3]) * width
h = float(pred[4]) * height
xmin = x - w / 2
ymin = y - h / 2
xmax = x + w / 2
ymax = y + h / 2
coco_json.append({
'image_id': int(image_name),
'category_id': int(reid_mp[category_id]),
'bbox': [xmin, ymin, w, h],
'score': float(pred[5]),
'area': w * h})
# save json
with open(os.path.join(coco_json_save), 'w') as f:
json.dump(coco_json, f, indent=2)
print(len(coco_json), 'Done!')