前情提要:
需要实现在cvat根据自己训练的模型去实现自动标注功能 使用nuclio实现
模型是基于yolov8训练的
在网上查阅的资料都是基于yolov5的,根据步骤在编写model_handler.py时发现yolov5和yolov8存在变化,于是根据ai把yolov5的代码转换成了yolov8,不确定是不是因为这一步产生的问题,目前是在通过nuclio deploy运行部署时报错
下面贴一下两个代码
yolov5:
class ModelHandler:
def __init__(self, weights='/opt/nuclio/yolov5s_bunker.pt', device='cpu', dnn=False):
self.device = select_device(device)
self.model = DetectMultiBackend(weights, device=device, dnn=dnn)
def infer(self, image):
imgsz = (640, 640)
stride, names = self.model.stride, self.model.names
imgsz = check_img_size(imgsz, s=stride)
# Padded resize
img = letterbox(image, imgsz, stride=stride, auto=True)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
im, im0s = img, image
results = []
im = torch.from_numpy(im).to(self.device)
im = im.half() if self.model.fp16 else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
pred = self.model(im, augment=False, visualize=False)
# NMS
pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, max_det=50)
im0 = im0s.copy()
if pred[0].size()[0] > 0:
for dett in pred:
dett = torch.unsqueeze(dett[0, :], 0)
box = scale_boxes(im.shape[2:], dett[:, :4], im0.shape).round().cpu()
x1 = int(box[0][0])
y1 = int(box[0][1])
x2 = int(box[0][2])
y2 = int(box[0][3])
conf = dett[0][4]
cls = dett[0][5]
results.append({
"confidence": str(float(conf)),
"label": names[int(cls)],
"points": [x1, y1, x2, y2],
"type": "rectangle",
})
return results
yolov8:
import torch
import cv2
import numpy as np
class ModelHandler:
def __init__(self, weights='/opt/nuclio/yolov8_bubble.pt', device='0', dnn=False):
# 加载模型
self.device = torch.device(device)
self.model = torch.load(weights, map_location=self.device)
self.model.to(self.device)
self.model.eval()
def infer(self, image):
# resize image
img = cv2.resize(image, (640, 640))
# convert image to tensor
img = torch.tensor(img).permute(2, 0, 1).unsqueeze(0).float()
img = img.to(self.device)
# forward pass
outputs = self.model(img)
# parse outputs
results = []
for output in outputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.25:
x, y, w, h = detection[:4] * np.array([640, 640, 640, 640])
x1, y1, x2, y2 = int(x - w / 2), int(y - h / 2), int(x + w / 2), int(y + h / 2)
results.append({
"confidence": str(float(confidence)),
"label": str(class_id),
"points": [x1, y1, x2, y2],
"type": "rectangle",
})
return results
再贴一下function.yaml文件
metadata:
name: pth-ultralytics-yolov8_bublle
namespace: cvat
annotations:
name: yolov8_bublle
type: detector
framework: pytorch
spec: |
[
{ "id": 1, "name": "crack"},
{ "id": 2, "name": "bubble"}
]
spec:
description: yolov8_bublle from ultralytics
runtime: 'python:3.8'
handler: main:handler
eventTimeout: 30s
build:
image: cvat/pth.ultralytics.yolov8_bublle:v1
baseImage: yolov8_bubble_test:v1
directives:
preCopy:
- kind: WORKDIR
value: /opt/nuclio
triggers:
myHttpTrigger:
maxWorkers: 1
kind: 'http'
workerAvailabilityTimeoutMilliseconds: 10000
attributes:
maxRequestBodySize: 33554432 # 32MB
platform:
attributes:
restartPolicy:
name: always
maximumRetryCount: 3
mountMode: volume
有没有佬友指点迷津啊!