YOLOV5 onnx推理 python

 

 pip install onnx coremltools onnx-simplifier

 

3.使用onnx-simplier简化模型

python -m onnxsim best.onnx best-sim.onnx

 

# coding=utf-8
import cv2
import numpy as np
import onnxruntime
import torch
import torchvision
import time
import random
from utils.general import non_max_suppression
class YOLOV5_ONNX(object):def __init__(self,onnx_path):'''初始化onnx'''self.onnx_session=onnxruntime.InferenceSession(onnx_path)print(onnxruntime.get_device())self.input_name=self.get_input_name()self.output_name=self.get_output_name()self.classes=['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light','fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow','elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee','skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard','tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple','sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch','potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone','microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear','hair drier', 'toothbrush']def get_input_name(self):'''获取输入节点名称'''input_name=[]for node in self.onnx_session.get_inputs():input_name.append(node.name)return input_namedef get_output_name(self):'''获取输出节点名称'''output_name=[]for node in self.onnx_session.get_outputs():output_name.append(node.name)return output_namedef get_input_feed(self,image_tensor):'''获取输入tensor'''input_feed={}for name in self.input_name:input_feed[name]=image_tensorreturn input_feeddef letterbox(self,img, new_shape=(640, 640), color=(114, 114, 114), auto=False, scaleFill=False, scaleup=True,stride=32):'''图片归一化'''# Resize and pad image while meeting stride-multiple constraintsshape = img.shape[:2]  # current shape [height, width]if isinstance(new_shape, int):new_shape = (new_shape, new_shape)# Scale ratio (new / old)r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])if not scaleup:  # only scale down, do not scale up (for better test mAP)r = min(r, 1.0)# Compute paddingratio = r, r  # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh paddingif auto:  # minimum rectangledw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh paddingelif scaleFill:  # stretchdw, dh = 0.0, 0.0new_unpad = (new_shape[1], new_shape[0])ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios
dw /= 2  # divide padding into 2 sidesdh /= 2if shape[::-1] != new_unpad:  # resizeimg = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))left, right = int(round(dw - 0.1)), int(round(dw + 0.1))img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add borderreturn img, ratio, (dw, dh)def xywh2xyxy(self,x):# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-righty = np.copy(x)y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left xy[:, 1] = x[:, 1] - x[:, 3] / 2  # top left yy[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right xy[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right yreturn ydef nms(self,prediction, conf_thres=0.1, iou_thres=0.6, agnostic=False):if prediction.dtype is torch.float16:prediction = prediction.float()  # to FP32xc = prediction[..., 4] > conf_thres  # candidatesmin_wh, max_wh = 2, 4096  # (pixels) minimum and maximum box width and heightmax_det = 300  # maximum number of detections per imageoutput = [None] * prediction.shape[0]for xi, x in enumerate(prediction):  # image index, image inferencex = x[xc[xi]]  # confidenceif not x.shape[0]:continuex[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_confbox = self.xywh2xyxy(x[:, :4])conf, j = x[:, 5:].max(1, keepdim=True)x = torch.cat((torch.tensor(box), conf, j.float()), 1)[conf.view(-1) > conf_thres]n = x.shape[0]  # number of boxesif not n:continuec = x[:, 5:6] * (0 if agnostic else max_wh)  # classesboxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scoresi = torchvision.ops.boxes.nms(boxes, scores, iou_thres)if i.shape[0] > max_det:  # limit detectionsi = i[:max_det]output[xi] = x[i]return outputdef clip_coords(self,boxes, img_shape):'''查看是否越界'''# Clip bounding xyxy bounding boxes to image shape (height, width)boxes[:, 0].clamp_(0, img_shape[1])  # x1boxes[:, 1].clamp_(0, img_shape[0])  # y1boxes[:, 2].clamp_(0, img_shape[1])  # x2boxes[:, 3].clamp_(0, img_shape[0])  # y2def scale_coords(self,img1_shape, coords, img0_shape, ratio_pad=None):'''坐标对应到原始图像上,反操作:减去pad,除以最小缩放比例:param img1_shape: 输入尺寸:param coords: 输入坐标:param img0_shape: 映射的尺寸:param ratio_pad::return:'''# Rescale coords (xyxy) from img1_shape to img0_shapeif ratio_pad is None:  # calculate from img0_shapegain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new,计算缩放比率pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding ,计算扩充的尺寸else:gain = ratio_pad[0][0]pad = ratio_pad[1]coords[:, [0, 2]] -= pad[0]  # x padding,减去x方向上的扩充coords[:, [1, 3]] -= pad[1]  # y padding,减去y方向上的扩充coords[:, :4] /= gain  # 将box坐标对应到原始图像上self.clip_coords(coords, img0_shape)  # 边界检查return coordsdef sigmoid(self,x):return 1 / (1 + np.exp(-x))def infer(self,img_path):'''执行前向操作预测输出'''# 超参数设置img_size=(640,640) #图片缩放大小# 读取图片src_img=cv2.imread(img_path)start=time.time()src_size=src_img.shape[:2]# 图片填充并归一化img=self.letterbox(src_img,img_size,stride=32)[0]# Convertimg = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416img = np.ascontiguousarray(img)# 归一化img=img.astype(dtype=np.float32)img/=255.0# # BGR to RGB# img = img[:, :, ::-1].transpose(2, 0, 1)# img = np.ascontiguousarray(img)# 维度扩张img=np.expand_dims(img,axis=0)print('img resuming: ',time.time()-start)# 前向推理# start=time.time()input_feed=self.get_input_feed(img)# ort_inputs = {self.onnx_session.get_inputs()[0].name: input_feed[None].numpy()}pred = torch.tensor(self.onnx_session.run(None, input_feed)[0])results = non_max_suppression(pred, 0.5,0.5)print('onnx resuming: ',time.time()-start)# pred=self.onnx_session.run(output_names=self.output_name,input_feed=input_feed)#映射到原始图像img_shape=img.shape[2:]# print(img_size)for det in results:  # detections per imageif det is not None and len(det):det[:, :4] = self.scale_coords(img_shape, det[:, :4],src_size).round()print(time.time()-start)if det is not None and len(det):self.draw(src_img, det)def plot_one_box(self,x, img, color=None, label=None, line_thickness=None):# Plots one bounding box on image imgtl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1  # line/font thicknesscolor = color or [random.randint(0, 255) for _ in range(3)]c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)if label:tf = max(tl - 1, 1)  # font thicknesst_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filledcv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)def draw(self,img, boxinfo):colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(self.classes))]for *xyxy, conf, cls in boxinfo:label = '%s %.2f' % (self.classes[int(cls)], conf)# print('xyxy: ', xyxy)self.plot_one_box(xyxy, img, label=label, color=colors[int(cls)], line_thickness=1)cv2.namedWindow("dst",0)cv2.imshow("dst", img)cv2.imwrite("res1.jpg",img)cv2.waitKey(0)# cv2.imencode('.jpg', img)[1].tofile(os.path.join(dst, id + ".jpg"))return 0if __name__=="__main__":model=YOLOV5_ONNX(onnx_path='./yolov5s6.onnx')model.infer(img_path="./data/images/bus.jpg")

 

 

结果显示:

 

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.hqwc.cn/news/797002.html

如若内容造成侵权/违法违规/事实不符,请联系编程知识网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

C#窗体应用中打开控制输出内容

窗体程序中打开控制台输出内容 namespace WinForms中打开控制台 {public partial class Form1 : Form{/////////////以下控制台调用相关代码///////////////////////////[System.Runtime.InteropServices.DllImport("kernel32.dll", SetLastError = true)][return: S…

推荐3款卓越的 .NET 开源搜索组件库

前言 最近有不少同学提问;.NET有哪些开源的搜索组件库可以推荐的吗?,今天大姚给大家推荐3款卓越的 .NET 开源搜索组件库,希望可以帮助到有需要的同学。 Elasticsearch .NET Elasticsearch 的 .NET 客户端为 Elasticsearch API 提供强类型请求和响应。它将协议处理委托给 El…

ATTCK红队评估(红日靶场5)

‍ 靶机介绍此次靶场虚拟机共用两个,一个外网一个内网,用来练习红队相关内容和方向,主要包括常规信息收集、Web攻防、代码审计、漏洞利用、内网渗透以及域渗透等相关内容学习,此靶场主要用来学习,请大家遵守网络网络安全法。‍‍注意:Win7双网卡模拟内外网‍ 环境搭建使用…

中秋节快乐简单html页面

<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>中秋快乐</title> <style>@fo…

专业角度深入讲解:大模型备案(生成式人工智能)

一、什么是大模型备案? 大模型备案是指大模型产品在向公众开放及商用之前,经过国家互联网信息办公室(简称“网信办”)等监管部门的备案审批过程。 是为加强生成式人工智能服务的合规管理,通过备案制度,促进人工智能技术的健康发展,建立起一个既安全又可靠的人工智能服务…

PbootCMS怎么调用网站的留言数和文章总数

在 PBootCMS 中,可以使用 pboot:sql 标签来自定义任意查询语句并循环输出。下面详细介绍如何使用此标签来调用网站的留言数和文章总数。 1. 调用网站的留言数 示例代码html{pboot:sql sql="select count(id) as total from ay_message"}留言合计:[sql:total]条 {/p…

Github数据泄露事件处置常见技巧

手动获取个人邮箱 方法一:通过commits找到作者提交的,点击箭头位置在出现的url后面加上.patch https://github.com/xxxx/xxxexample/commit/4a0b0613da9ca66c61bc9e8eeebe7325c4908afeb 修改后的 https://github.com/xxxx/xxxexample/commit/4a0b0613da9ca66c61bc9e8eeebe732…

2024年9月中国数据库排行榜:openGauss系多点开花,根社区优势明显

本文解读墨天轮数据库流行度排行榜,从产业格局视角梳理行业发展走向,开源势力力争上游、阿里华为两极鼎立云上云下各争先,欢迎一起阅读交流!在墨天轮发布的9月中国数据库流行度排行榜中,中国数据库产业格局进一步聚集刷新,呈现出3大显著特征:开源势力力争上游显优势领先…

PbootCMS模板自动生成当前页面二维码

在 PBootCMS 中,qrcode 标签用于生成对应文本的二维码图片。这对于产品列表页或详情页为每个产品生成二维码非常有用。以下是详细的使用说明和示例代码。 1. qrcode 标签的基本用法 参数说明string=*:指定生成二维码的文本内容。2. 示例代码 生成产品详情页的二维码 假设你需…

PbootCMS会员相关标签调用

在 PBootCMS 中,你可以通过一系列会员相关的标签来实现会员管理功能。以下是对这些标签的具体说明和使用方法: 1. 基本标签 标签说明{pboot:ucenter}:个人中心地址 {pboot:login}:登录地址 {pboot:register}:注册地址 {pboot:umodify}:资料修改地址 {pboot:logout}:退出…