From 139acca29e5e1f9c58d9cbcc4f422f825e08e09a Mon Sep 17 00:00:00 2001 From: Nickiel12 <35903114+Nickiel12@users.noreply.github.com> Date: Tue, 16 Apr 2024 17:26:17 -0700 Subject: [PATCH] added handshake code --- Client/yolo_client.py | 34 +++++++++++++++++++---------- YOLO python/generate-predictions.py | 2 +- joystick-controller-client | 2 +- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/Client/yolo_client.py b/Client/yolo_client.py index b4463e3..a83805f 100644 --- a/Client/yolo_client.py +++ b/Client/yolo_client.py @@ -20,10 +20,20 @@ classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "trai def main(websocket_addr): # Connect to websocket server - # ws = create_connection(websocket_addr) + ws = create_connection(websocket_addr) + + handshake = ws.recv() + + if handshake == "Type?": + ws.send("Type: Automated") + else: + ws.close() + return + + print("Handshake complete") # Load YOLO model - model = YOLO('yolov8s.pt') # Load an official Detect model + model = YOLO('yolov8x.pt') # Load an official Detect model # Open webcam cap = cv2.VideoCapture(0) @@ -32,19 +42,20 @@ def main(websocket_addr): while cap.isOpened(): ret, frame = cap.read() + frame = cv2.resize(frame, (640, 480)) if not ret: break # Perform object detection - results = model.predict(frame) - - max_x1, max_y1, max_x2, max_y2 = 0 + results = model.track(frame, persist=True) for r in results: + lines = "" boxes = r.boxes for box in boxes: - if box.cls[0].item() == 0: + if box.cls[0].item() == 0 and not box.id is None: # bounding box + id = box.id.int().cpu().tolist() x1, y1, x2, y2 = box.xyxy[0] x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) # convert to int values @@ -56,20 +67,19 @@ def main(websocket_addr): # object details org = [x1, y1] + org2 = [x1, y1+50] font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 1 color = (255, 0, 0) + color_w = (255, 255, 255) thickness = 2 cv2.putText(frame, classNames[cls], org, font, fontScale, color, thickness) + cv2.putText(frame, str(id), org2, font, fontScale, color_w, thickness) - if (x2-x1) * (y2 - y1): - max_x1 = x1 - max_x2 = x2 - max_y1 = y1 - max_y2 = y2 + lines += f"{id} {x1}:{y1} {x2}:{y2}\n" - print(f"{max_x1}:{max_y1}, {max_x2}:{max_y2}") + ws.send(lines) cv2.imshow('Webcam', frame) if cv2.waitKey(1) == ord('q'): diff --git a/YOLO python/generate-predictions.py b/YOLO python/generate-predictions.py index 5a91270..18158b7 100644 --- a/YOLO python/generate-predictions.py +++ b/YOLO python/generate-predictions.py @@ -10,5 +10,5 @@ model = YOLO('yolov8s.pt') # Load an official Detect model # Run inference on 'bus.jpg' with arguments -for i in model.predict('D:\Projects\FaceTrackingCamerav3\YOLO python\kevin-center-camera-position.mp4', show=True, save=True, save_frames=True, save_txt=True, conf=0.5, stream=True): +for i in model.predict("D:\Projects\FaceTrackingCamerav3\Raymond.mp4", save=True, save_frames=True, save_txt=True, conf=0.5, stream=True): continue \ No newline at end of file diff --git a/joystick-controller-client b/joystick-controller-client index 2278b4c..ef96409 160000 --- a/joystick-controller-client +++ b/joystick-controller-client @@ -1 +1 @@ -Subproject commit 2278b4cd79c9ec0267198eba9f92b4e4ad4aeb78 +Subproject commit ef96409082241db6ff012dc2d898894e6efde3a3