原理是利用openmv的usb模仿串口,然后用Python代码打开串口接收
能替代openmv ide 跑48帧图像
Python端需要的依赖:
需要的是:
from ultralytics import YOLO
import cv2
import numpy as np
from serial import Serial
import time
from collections import deque
pyserial 3.5
numpy 2.0.2
ultralytics 8.3.67
opencv-contrib-python 4.11.0.86
opencv-python 4.11.0.86
Python端代码:
from ultralytics import YOLO
import cv2
import numpy as np
from serial import Serial
import time
from collections import deque# 配置参数
SERIAL_PORT = 'COM25' # 串口号(Windows)
BAUD_RATE = 1 # 波特率
HEADER = b'\xAA\x55\xAA\x55' # 帧头
HEADER_LEN = len(HEADER) # 帧头长度
FPS_WINDOW_SIZE = 30 # 计算FPS的窗口大小(帧数)# 初始化YOLO模型
model = YOLO('yolov8n.pt') # 确认模型路径正确# 性能统计变量
fps_queue = deque(maxlen=FPS_WINDOW_SIZE) # 存储时间戳用于计算FPS
serial_read_time = 0.0 # 串口读取耗时
inference_time = 0.0 # 推理耗时def find_header(ser):"""同步数据流,找到帧头位置"""buffer = bytearray()while True:byte = ser.read(1)if not byte:continuebuffer += byteif len(buffer) >= HEADER_LEN and buffer[-HEADER_LEN:] == HEADER:returnelif len(buffer) > 100:buffer.clear()def draw_stats(img, fps, srt, inf_t):"""在图像上绘制性能统计信息"""cv2.putText(img, f"FPS: {fps:.1f}", (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)cv2.putText(img, f"Serial: {srt:.1f}ms", (10, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 1)cv2.putText(img, f"Inference: {inf_t:.1f}ms", (10, 90),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 1)return imgtry:ser = Serial(SERIAL_PORT, BAUD_RATE, timeout=0.1)print(f"Connected to {SERIAL_PORT}")while True:frame_start = time.perf_counter()# 步骤1:同步到帧头find_header(ser)# 步骤2:读取图像大小size_bytes = ser.read(4)if len(size_bytes) != 4:continue# 步骤3:读取图像数据size = int.from_bytes(size_bytes, 'little')img_bytes = bytearray()read_start = time.perf_counter()while len(img_bytes) < size:remaining = size - len(img_bytes)img_bytes += ser.read(remaining)serial_read_time = (time.perf_counter() - read_start) * 1000 # ms# 解码图像decode_start = time.perf_counter()img = cv2.imdecode(np.frombuffer(img_bytes, np.uint8), cv2.IMREAD_COLOR)if img is None:continue# YOLO推理inference_start = time.perf_counter()# results = model(img, show=False, verbose=False) # 关闭冗余输出inference_time = (time.perf_counter() - inference_start) * 1000 # ms# 计算FPSfps_queue.append(frame_start)if len(fps_queue) > 1:fps = len(fps_queue) / (fps_queue[-1] - fps_queue[0])else:fps = 0# 绘制统计信息# annotated_img = results[0].plot()annotated_img = draw_stats(img, fps, serial_read_time, inference_time)# 显示结果cv2.imshow('YOLO Detection', annotated_img)# 退出检测if cv2.waitKey(1) & 0xFF == ord('q'):breakexcept KeyboardInterrupt:print("User interrupted.")
except Exception as e:print(f"Error: {e}")
finally:ser.close()cv2.destroyAllWindows()print("Resources released.")
把openmv代码作为main放到openmv里然后断开连接重新连接,并且一定要保证openmv ide连不到openmv
openmv端代码:
import sensor, image, time, pyb# 摄像头初始化
sensor.reset()
sensor.set_pixformat(sensor.JPEG) # 设置JPEG压缩模式
sensor.set_framesize(sensor.QVGA) # 分辨率320x240(可调整)
sensor.set_quality(80) # 压缩质量(0-100,越高越清晰)
sensor.skip_frames(time=2000) # 等待摄像头稳定usb = pyb.USB_VCP() # USB虚拟串口对象
HEADER = b'\xAA\x55\xAA\x55' # 帧头标识(4字节)while True:try:img = sensor.snapshot()img_bytes = img.bytearray() # 获取JPEG图像字节流# 发送数据包格式:[帧头(4字节) + 图像大小(4字节) + 图像数据]usb.write(HEADER) # 1. 发送帧头usb.write(len(img_bytes).to_bytes(4, 'little')) # 2. 发送图像大小(小端序)usb.write(img_bytes) # 3. 发送图像数据time.sleep_ms(1) # 控制帧率(约20FPS,根据实际调整)except Exception as e:print("OpenMV Error:", e)break