update files

This commit is contained in:
Nickiel12 2024-05-18 13:24:51 -07:00
parent fc73f8e0a3
commit 36123f4ed8
7 changed files with 87 additions and 169 deletions

View file

@ -0,0 +1,86 @@
import websockets
import asyncio
import numpy as np
from ultralytics import YOLO
import cv2
classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
"dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
"handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat",
"baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup",
"fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli",
"carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed",
"diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
"teddy bear", "hair drier", "toothbrush"
]
model = YOLO('yolov8s.pt') # Load an official Detect model
async def handle_connection(websocket, path):
print(f"Connection from: {path}")
try:
while True:
raw_data = await websocket.recv()
nparr = np.frombuffer(raw_data, np.uint8).reshape((480, 640, 3))
# frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
frame = cv2.cvtColor(nparr, cv2.COLOR_BGR2RGB)
# Perform object detection
results = model.track(frame, persist=True)
for r in results:
lines = ""
boxes = r.boxes
for box in boxes:
if box.cls[0].item() == 0 and not box.id is None:
# bounding box
id = box.id.int().cpu().tolist()
# x1, y1, x2, y2 = box.xyxy[0]
# # 2.08333 = 1/480 * 1000 or normalize, then save 4 sig-figures and cast to int
# # 1.5625 = 1/640 * 1000 or normalize, then save 4 sig-figures and cast to int
# x1, x2, y1, y2 = int(x1), int(x2), int(y1), int(y2)
# # put box in cam
# cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 255), 3)
# # class name
# cls = int(box.cls[0])
# # object details
# org = [x1, y1]
# org2 = [x1, y1+50]
# font = cv2.FONT_HERSHEY_SIMPLEX
# fontScale = 1
# color = (255, 0, 0)
# color_w = (255, 255, 255)
# thickness = 2
# cv2.putText(frame, classNames[cls], org, font, fontScale, color, thickness)
# cv2.putText(frame, str(id), org2, font, fontScale, color_w, thickness)
x1, y1, x2, y2 = box.xyxyn[0]
x1, y1, x2, y2 = int(x1 * 1000), int(y1 * 1000), int(x2 * 1000), int(y2 * 1000)
lines += f"{id} {x1}:{y1} {x2}:{y2}\n"
# cv2.imshow('Webcam', frame)
# if cv2.waitKey(1) == ord('q'):
# break
ret = lines.encode('utf-8')
await websocket.send(ret)
except websockets.exceptions.ConnectionClosed:
print(f"Client disconnected: {path}")
if __name__ == "__main__":
start_server = websockets.serve(handle_connection, "0.0.0.0", 6543)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()

View file

@ -1,7 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "face_tracking_demo"
version = "0.1.0"

View file

@ -1,6 +0,0 @@
[package]
name = "face_tracking_demo"
version = "0.1.0"
edition = "2021"
[dependencies]

View file

@ -1,61 +0,0 @@
use opencv::{
core, highgui, prelude::*, video::{self, TrackerDaSiamRPN_Params}, videoio
};
fn main() -> opencv::Result<()> {
let window = "window";
highgui::named_window(window, highgui::WINDOW_AUTOSIZE)?;
// let mut cam = opencv::videoio::VideoCapture::new(0, opencv::videoio::CAP_ANY)?; // 0 is the default camera
let mut cap = videoio::VideoCapture::from_file(r"C:\Users\nicho\Desktop\tmp\test-vid.mp4", videoio::CAP_ANY)?;
let opened = opencv::videoio::VideoCapture::is_opened(&cap)?;
if !opened {
panic!("Unable to open default camera!");
}
let mut frame = core::Mat::default();
// let mut tracker = opencv::video::TrackerMIL::create(opencv::video::TrackerMIL_Params::default().unwrap())?;
let mut tracker = opencv::video::TrackerDaSiamRPN::create(&opencv::video::TrackerDaSiamRPN_Params::default().unwrap())?;
// let mut tracker = opencv::video::TrackerGOTURN::create(&opencv::video::TrackerGOTURN_Params::default().unwrap())?;
// let mut tracker = opencv::video::TrackerNano::create(&opencv::video::TrackerNano_Params::default().unwrap())?;
// let mut tracker = opencv::tracking::TrackerKCF::create()
// let mut tracker = opencv::alphamat
let mut bounding_box = core::Rect::new(0, 0, 0, 0);
let mut update_tracker = false;
let mut frame_counter: u64 = 0;
loop {
// cam.read(&mut frame)?;
cap.read(&mut frame)?;
if !update_tracker {
bounding_box = highgui::select_roi(window, &frame, false, false, true)?;
let _ = tracker.init(&frame, bounding_box);
update_tracker = true;
}
let key = highgui::wait_key(10)?;
if key > 0 {
if key == 27 {
break; // 'ESC' to quit
} else if key == 32 {
// 'SPACE' to select an object to track
}
}
if update_tracker && frame_counter % 3 == 0 {
let _ = tracker.update(&frame, &mut bounding_box);
}
frame_counter += 1;
println!("box is: {}, {}", bounding_box.x, bounding_box.y);
opencv::imgproc::rectangle(&mut frame, bounding_box, core::Scalar::new(255.0, 0.0, 0.0, 0.0), 2, 8, 0)?;
if frame.size()?.width > 0 {
highgui::imshow(window, &mut frame)?;
}
}
Ok(())
}

View file

@ -1,94 +0,0 @@
import cv2
import sys
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
if __name__ == '__main__' :
# Set up tracker.
# Instead of MIL, you can also use
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
tracker_type = tracker_types[2]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
if tracker_type == 'MOSSE':
tracker = cv2.TrackerMOSSE_create()
if tracker_type == "CSRT":
tracker = cv2.TrackerCSRT_create()
# Read video
video = cv2.VideoCapture("videos/chaplin.mp4")
# Exit if video not opened.
if not video.isOpened():
print("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print('Cannot read video file')
sys.exit()
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
while True:
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break

@ -1 +1 @@
Subproject commit 3cb560093c6de5794cb654a46fb6dd12c8377042
Subproject commit e59025f645b13236cfd88e5f5e77b6692a3fc690