WGU-Capstone/Face_Detect with borders.py

54 lines
1.8 KiB
Python
Raw Normal View History

2023-09-22 18:26:47 -07:00
import cv2
import numpy as np
2023-09-22 19:58:34 -07:00
multiplication_factor = 0.05
2023-09-22 18:26:47 -07:00
2023-09-22 19:58:34 -07:00
def get_adjustment_amount(imgSize, currentX, currentY, currentW, currentH):
current_top_left = [currentX, currentY]
current_bottom_right = [currentX + currentW, currentY + currentH]
current_top_right = [currentX + currentW, currentY]
# find the difference between the left gap and the right gap, divide it by two, and multiply it by the speed scale
horizontal_adjustment = multiplication_factor * (currentX - (imgSize[0] - current_top_right[0])) / 2
vertical_adjustment = multiplication_factor * (currentY - (imgSize[0] - current_bottom_right[1])) / 2
return [horizontal_adjustment, vertical_adjustment]
2023-09-22 18:26:47 -07:00
cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1.
2023-09-22 19:58:34 -07:00
faceCascade = cv2.CascadeClassifier(r"./lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING
2023-09-22 18:26:47 -07:00
tmp, frm = cap.read()
height, width, channels = frm.shape
print(f"{height*.25}, {width}")
del tmp, frm
#Color is 1, grayscale is 0, and the unchanged is -1
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
2023-09-22 19:58:34 -07:00
adjustment_required = get_adjustment_amount([width, height], x, y, w, h)
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255))
print(f"Adjust right: {adjustment_required[0]}")
print(f"Adjust up : {adjustment_required[1]}")
cv2.imshow('frame', frame)
2023-09-22 18:26:47 -07:00
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()