import cv2 import numpy as np multiplication_factor = 0.05 def get_adjustment_amount(imgSize, currentX, currentY, currentW, currentH): current_top_left = [currentX, currentY] current_bottom_right = [currentX + currentW, currentY + currentH] current_top_right = [currentX + currentW, currentY] # find the difference between the left gap and the right gap, divide it by two, and multiply it by the speed scale horizontal_adjustment = multiplication_factor * (currentX - (imgSize[0] - current_top_right[0])) / 2 vertical_adjustment = multiplication_factor * (currentY - (imgSize[0] - current_bottom_right[1])) / 2 return [horizontal_adjustment, vertical_adjustment] cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1. faceCascade = cv2.CascadeClassifier(r"./lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING tmp, frm = cap.read() height, width, channels = frm.shape print(f"{height*.25}, {width}") del tmp, frm #Color is 1, grayscale is 0, and the unchanged is -1 while(True): ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Detect faces in the image faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30) ) # Draw a rectangle around the faces for (x, y, w, h) in faces: adjustment_required = get_adjustment_amount([width, height], x, y, w, h) cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255)) print(f"Adjust right: {adjustment_required[0]}") print(f"Adjust up : {adjustment_required[1]}") cv2.imshow('frame', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release()