small improvements

Signed-off-by: Nickiel12 <35903114+Nickiel12@users.noreply.github.com>
This commit is contained in:
Nickiel12 2024-07-16 18:00:57 -07:00
parent d7d5887fb1
commit c3b3bb6ded
4 changed files with 20 additions and 9 deletions

View file

@ -2,7 +2,6 @@ import websockets
import asyncio import asyncio
import numpy as np import numpy as np
from ultralytics import YOLO from ultralytics import YOLO
import time
import cv2 import cv2
classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
@ -17,18 +16,21 @@ classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "trai
"teddy bear", "hair drier", "toothbrush" "teddy bear", "hair drier", "toothbrush"
] ]
model = YOLO('yolov8s.pt') # Load an official Detect model model = YOLO('yolov8m.pt') # Load an official Detect model
async def handle_connection(websocket, path): async def handle_connection(websocket, path):
print(f"Connection from: {path}") print(f"Connection from: {path}")
try: try:
while True: while True:
raw_data = await websocket.recv() raw_data = await websocket.recv()
print(f"downloaded bytes: {len(raw_data)}")
nparr = np.frombuffer(raw_data, np.uint8).reshape((480, 640, 3)) # nparr = np.frombuffer(decoded_bytes, np.uint8).reshape((480, 640, 3))
nparr = np.frombuffer(raw_data, dtype=np.uint8)
# frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR) frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
frame = cv2.cvtColor(nparr, cv2.COLOR_BGR2RGB)
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.imshow("from_remote", frame) cv2.imshow("from_remote", frame)
# Perform object detection # Perform object detection
@ -75,7 +77,7 @@ async def handle_connection(websocket, path):
ret = lines.encode('utf-8') ret = lines.encode('utf-8')
cv2.waitKey(80) cv2.waitKey(1)
await websocket.send(ret) await websocket.send(ret)

View file

@ -5,12 +5,13 @@ import cv2
import math import math
# model # model
model = YOLO("yolov8x.pt") # model = YOLO("yolov8x.pt")
model = YOLO('yolov8n_full_integer_quant_edgetpu.tflite', task="detect") # Load an official Detect model
# start webcam # start webcam
cap = cv2.VideoCapture(0) cap = cv2.VideoCapture(0)
cap.set(3, 640) cap.set(3, 640)
cap.set(4, 480) cap.set(4, 640)
# object classes # object classes
classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
@ -28,6 +29,8 @@ classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "trai
while True: while True:
success, img = cap.read() success, img = cap.read()
# img = cv2.resize(img, (512, 512), cv2.INTER_AREA)
print(f"Image dimensions: {img.shape}")
results = model.track(img, persist=True, stream=True) results = model.track(img, persist=True, stream=True)
# coordinates # coordinates

View file

@ -0,0 +1,6 @@
from ultralytics import YOLO
model = YOLO("yolov8n.pt")
# model.export(format="onnx", simplify=True, int8=True)
model.export(format="engine")

@ -1 +1 @@
Subproject commit a3d8a6fd064c0a194dc0815e7f19c900937921d9 Subproject commit 0a47c7a9583148c101652e3772cf5ecad95d3092