Pre Merge pull request !13 from Daniel/lxm

This commit is contained in:
Daniel 2023-08-11 11:31:52 +00:00 committed by Gitee
commit c8020bf903
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
5 changed files with 309 additions and 0 deletions

View File

@ -269,6 +269,9 @@ target_link_libraries(GimbalUdpDetectionInfoSender sv_world)
add_executable(EvalFpsOnVideo samples/test/eval_fps_on_video.cpp)
target_link_libraries(EvalFpsOnVideo sv_world)
add_executable(EvalModelOnCocoVal samples/test/eval_mAP_on_coco_val/eval_mAP_on_coco_val.cpp)
target_link_libraries(EvalModelOnCocoVal sv_world)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/samples/calib)
add_executable(CameraCalibrarion samples/calib/calibrate_camera_charuco.cpp)
target_link_libraries(CameraCalibrarion ${OpenCV_LIBS})

View File

@ -0,0 +1,25 @@
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import os
if __name__ == '__main__':
path = os.path.abspath(os.path.join(os.getcwd(),"../../.."))
pred_json = 'pd_coco.json'
anno_json = path + '/val2017/gt_coco.json'
# use COCO API to load forecast results and annotations
cocoGt = COCO(anno_json)
cocoDt = cocoGt.loadRes(pred_json)
# create COCO eval object
cocoEval = COCOeval(cocoGt, cocoDt,'bbox')
# assessment
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
# save results
with open('coco_eval.txt', 'w') as f:
f.write(str(cocoEval.stats))

View File

@ -0,0 +1,100 @@
#include <iostream>
#include <string>
// 包含SpireCV SDK头文件
#include <sv_world.h>
using namespace std;
using namespace cv;
//extract name
std::string GetImageFileName(const std::string& imagePath) {
size_t lastSlash = imagePath.find_last_of("/\\");
if (lastSlash == std::string::npos) {
return imagePath;
} else {
std::string fileName = imagePath.substr(lastSlash + 1);
size_t lastDot = fileName.find_last_of(".");
if (lastDot != std::string::npos) {
return fileName.substr(0, lastDot);
}
return fileName;
}
}
int main(int argc, char *argv[])
{
// 实例化 通用目标 检测器类
sv::CommonObjectDetector cod;
// 手动导入相机参数如果使用Amov的G1等吊舱或相机则可以忽略该步骤将自动下载相机参数文件
cod.loadCameraParams(sv::get_home() + "/SpireCV/calib_webcam_640x480.yaml");
//load data
string val_path = sv::get_home() + "/SpireCV/val2017/val2017";
vector<string> val_image;
glob(val_path, val_image, false);
if (val_image.size() == 0)
{
printf("val_image error!!!\n");
exit(1);
}
//preds folder
std::string folder = sv::get_home() + "/SpireCV/val2017/preds";
int checkStatus = std::system(("if [ -d \"" + folder + "\" ]; then echo; fi").c_str());
if(checkStatus == 0)
{
int removeStatus = std::system(("rm -rf \"" + folder + "\"").c_str());
if(removeStatus != 0)
{
printf("remove older preds folder error!!!\n");
exit(1);
}
}
int status = std::system(("mkdir \""+folder+"\"").c_str());
if(status != 0)
{
printf("create preds folder error!!!\n");
exit(1);
}
for (int i = 0; i < val_image.size(); i++) {
//create pred file
std::string val_image_name = GetImageFileName(val_image[i]);
std::string filename = folder+ val_image_name + ".txt";
std::ofstream file(filename);
file.is_open();
file<<std::fixed<<std::setprecision(6);
// 实例化SpireCV的 单帧检测结果 接口类 TargetsInFrame
sv::TargetsInFrame tgts(i);
cv::Mat img = imread(val_image[i]);
int rows = img.rows;
int cols = img.cols;
// 执行通用目标检测
cod.detect(img, tgts);
// 可视化检测结果叠加到img上
sv::drawTargetsInFrame(img, tgts);
// reslusts
for (int j = 0; j < tgts.targets.size(); j++)
{
sv::Box b;
tgts.targets[j].getBox(b);
file<<tgts.targets[j].category_id<<" "<<(float)(b.x1+b.x2)/(2*cols)<<" "<<(float)(b.y1+b.y2)/(2*rows)<<" "<<(float)(b.x2-b.x1)/cols<<" "<<(float)(b.y2-b.y1)/rows<<" "<<(float)tgts.targets[j].score<<"\n";
}
file.close();
cv::imshow("image", img);
cv::waitKey(500);
img.release();
cv::destroyAllWindows();
}
return 0;
}

View File

@ -0,0 +1,123 @@
# -*- coding: UTF-8 -*-
"""
@Author: lxm
@version V1.0
"""
import datetime
import json
import os
import cv2
# 将yolo格式的数据集转换成coco格式的数据集
# 读取文件夹下的所有文件
path = os.path.abspath(os.path.join(os.getcwd(),"../../.."))
images_path = path+'/val2017/val2017'
labels_path = path+'/val2017/labels'
coco_json_save = path + '/val2017/gt_coco.json'
# 创建coco格式的json文件
coco_json = {
'info': {
"description": "COCOVal Dataset",
"url": "www.amov.com",
"version": "1.0",
"year": 2023,
"contributor": "lxm",
"date_created": datetime.datetime.utcnow().isoformat(' ')
},
"licenses": [
{
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/",
"id": 1,
"name": "Attribution-NonCommercial-ShareAlike License"
}
],
'images': [],
'annotations': [],
'categories': []
}
# COCO classes
classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck",
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra",
"giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove",
"skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork",
"knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli",
"carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant",
"bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard",
"cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book",
"clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# 创建coco格式的json文件
for i, c in enumerate(classes):
coco_json['categories'].append({'id': i + 1, 'name': c, 'supercategory': c})
# 读取images文件夹下的所有文件
images = os.listdir(images_path)
for image in images:
# 获取图片名和后缀
image_name, image_suffix = os.path.splitext(image)
# 获取图片的宽和高
image_path = images_path + '/' + image
img = cv2.imread(image_path)
height, width, _ = img.shape
# 添加图片信息
coco_json['images'].append({
'id': int(image_name),
'file_name': image,
'width': width,
'height': height,
'date_captured': datetime.datetime.utcnow().isoformat(' '),
'license': 1
})
# 读取图片对应的标签文件
label_path = labels_path + '/' + image_name + '.txt'
if not os.path.exists(label_path):
continue
with open(label_path, 'r') as f:
labels = f.readlines()
labels = [l.strip() for l in labels]
for j, label in enumerate(labels):
label = label.split(' ')
# 获取类别id
category_id = int(label[0])
# 将yolo格式的数据转换成coco格式的数据
x = float(label[1]) * width
y = float(label[2]) * height
w = float(label[3]) * width
h = float(label[4]) * height
xmin = x - w / 2
ymin = y - h / 2
xmax = x + w / 2
ymax = y + h / 2
# 添加bbox信息
# 添加bbox信息
coco_json['annotations'].append({
'image_id': int(image_name),
'category_id': category_id + 1,
'bbox': [xmin, ymin, w, h],
'id': len(coco_json['annotations']),
'area': w * h,
'iscrowd': 0,
'segmentation': [],
'attributes': ""
})
# 保存json文件
with open(coco_json_save, 'w') as f:
json.dump(coco_json, f, indent=2)
print(len(coco_json['images']), len(coco_json['annotations']), len(coco_json['categories']), 'Done!')

View File

@ -0,0 +1,58 @@
import datetime
import json
import os
import cv2
# revert prediction results to coco_json
path = os.path.abspath(os.path.join(os.getcwd(),"../../.."))
# all files dir
images_path = path+'/val2017/val2017'
preds_path = path+'/val2017/preds'
coco_json_save ='pd_coco.json'
# config coco_json
coco_json = []
# load images dir
images = os.listdir(images_path)
for image in images:
print(image)
# get image name
image_name, image_suffix = os.path.splitext(image)
# get image W and H
image_path = images_path + '/' + image
img = cv2.imread(image_path)
height, width, _ = img.shape
# read pred's txt
pred_path = preds_path + '/' + image_name + '.txt'
if not os.path.exists(pred_path):
continue
with open(pred_path, 'r') as f:
preds = f.readlines()
preds = [l.strip() for l in preds]
for j, pred in enumerate(preds):
pred = pred.split(' ')
category_id = int(pred[0])
x = float(pred[1]) * width
y = float(pred[2]) * height
w = float(pred[3]) * width
h = float(pred[4]) * height
xmin = x - w / 2
ymin = y - h / 2
xmax = x + w / 2
ymax = y + h / 2
coco_json.append({
'image_id': int(image_name),
'category_id': category_id + 1,
'bbox': [xmin, ymin, w, h],
'score': float(pred[5]),
'area': w * h})
# save json
with open(os.path.join(coco_json_save), 'w') as f:
json.dump(coco_json, f, indent=2)
print(len(coco_json), 'Done!')