Compare commits

..

2 commits

Author SHA1 Message Date
0c46b9a190 added dashboard and cli flags 2023-09-28 18:18:13 -07:00
24c4c31656 added more cascades for better detection rates 2023-09-28 18:18:02 -07:00
9 changed files with 211486 additions and 8 deletions

View file

@ -1,6 +1,28 @@
import cv2 import cv2
import numpy as np import numpy as np
import argparse
import sys
import datetime
def init_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
prog="FaceDetection",
usage="%(prog)s [OPTION]",
description="Run face localization"
)
parser.add_argument(
"-v", "--version", action="version", version=f"{parser.prog} version 1.0.1"
)
parser.add_argument(
"-d", "--dashboard", action='store_true', help="Flag to enable live dashboard with statistics - requires terminal width of 90 columns or greater"
)
parser.add_argument(
"-o", "--output", action='store_true', help="show the resultant directions"
)
parser.add_argument(
"-f", "--file", nargs="?", help="File to scan instead of using the camera. Useful for generating training data"
)
return parser
multiplication_factor = 0.05 multiplication_factor = 0.05
@ -17,17 +39,47 @@ def get_adjustment_amount(imgSize, currentX, currentY, currentW, currentH):
return [horizontal_adjustment, vertical_adjustment] return [horizontal_adjustment, vertical_adjustment]
frames_searched = 1
faces_found = 0
start_time = datetime.datetime.now()
def draw_dashboard(keep_stat_line = False):
global frames_searched, faces_found, start_time
elapsed_time = datetime.datetime.now() - start_time
hours, remainder = divmod(elapsed_time.total_seconds(), 3600)
minutes, seconds = divmod(remainder, 60)
f_found = f"{faces_found} Faces found".ljust(16, ' ')
f_searched = f"{frames_searched} Frames searched".ljust(21, ' ')
success_rate = f"{round((faces_found / frames_searched) * 100, 1)}% Success rate".ljust(16, ' ')
if keep_stat_line:
print(f"{f_found} | {f_searched} | {success_rate} | {round(hours)}h {round(minutes)}m {round(seconds)}s elapsed", flush=True)
else:
print(f"{f_found} | {f_searched} | {success_rate} | {round(hours)}h {round(minutes)}m {round(seconds)}s elapsed", end="\r", flush=True)
parser = init_argparse()
args = parser.parse_args()
cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1. cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1.
faceCascade = cv2.CascadeClassifier(r"./lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING faceCascade = cv2.CascadeClassifier(r"./lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING
faceCascade_default = cv2.CascadeClassifier(r"./haarcascade_frontalface_default.xml")
faceCascade_alt = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt.xml")
faceCascade_alt2 = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt2.xml")
faceCascade_alttree = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt_tree.xml")
profileFaceCascade = cv2.CascadeClassifier(r"./haarcascade_profileface.xml")
tmp, frm = cap.read() tmp, frm = cap.read()
height, width, channels = frm.shape height, width, channels = frm.shape
print(f"{height*.25}, {width}") # print(f"{height*.25}, {width}")
del tmp, frm del tmp, frm
#Color is 1, grayscale is 0, and the unchanged is -1 #Color is 1, grayscale is 0, and the unchanged is -1
while(True): while(True):
ret, frame = cap.read() ret, frame = cap.read()
frames_searched += 1
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in the image # Detect faces in the image
@ -38,16 +90,60 @@ while(True):
minSize=(30, 30) minSize=(30, 30)
) )
if len(faces) == 0:
faces = faceCascade_default.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
if len(faces) == 0:
faces = profileFaceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
if len(faces) == 0:
faces = faceCascade_alt.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
if len(faces) == 0:
faces = faceCascade_alt2.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
if len(faces) == 0:
faces = faceCascade_alttree.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
# Draw a rectangle around the faces # Draw a rectangle around the faces
for (x, y, w, h) in faces: for (x, y, w, h) in faces:
faces_found += 1
adjustment_required = get_adjustment_amount([width, height], x, y, w, h) adjustment_required = get_adjustment_amount([width, height], x, y, w, h)
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255)) cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255))
print(f"Adjust right: {adjustment_required[0]}") if args.output:
print(f"Adjust up : {adjustment_required[1]}") print(f"Adjust right: {adjustment_required[0]}".ljust(90, ' '), flush=True)
print(f"Adjust up : {adjustment_required[1]}", flush=True)
cv2.imshow('frame', frame) cv2.imshow('frame', frame)
if args.dashboard:
draw_dashboard()
if cv2.waitKey(1) & 0xFF == ord('q'): if cv2.waitKey(1) & 0xFF == ord('q'):
break break
draw_dashboard(keep_stat_line=True)
cap.release() cap.release()

99
NN_dataset_generator.py Normal file
View file

@ -0,0 +1,99 @@
import cv2
import numpy as np
multiplication_factor = 0.05
def get_adjustment_amount(imgSize, currentX, currentY, currentW, currentH):
current_top_left = [currentX, currentY]
current_bottom_right = [currentX + currentW, currentY + currentH]
current_top_right = [currentX + currentW, currentY]
# find the difference between the left gap and the right gap, divide it by two, and multiply it by the speed scale
horizontal_adjustment = multiplication_factor * (currentX - (imgSize[0] - current_top_right[0])) / 2
vertical_adjustment = multiplication_factor * (currentY - (imgSize[0] - current_bottom_right[1])) / 2
return [horizontal_adjustment, vertical_adjustment]
cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1.
faceCascade = cv2.CascadeClassifier(r"./lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING
faceCascade_default = cv2.CascadeClassifier(r"./haarcascade_frontalface_default.xml")
faceCascade_alt = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt.xml")
faceCascade_alt2 = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt2.xml")
faceCascade_alttree = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt_tree.xml")
profileFaceCascade = cv2.CascadeClassifier(r"./haarcascade_profileface.xml")
tmp, frm = cap.read()
height, width, channels = frm.shape
print(f"{height*.25}, {width}")
del tmp, frm
#Color is 1, grayscale is 0, and the unchanged is -1
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
if len(faces) == 0:
faces = faceCascade_default.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
if len(faces) == 0:
faces = profileFaceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
if len(faces) == 0:
faces = faceCascade_alt.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
if len(faces) == 0:
faces = faceCascade_alt2.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
if len(faces) == 0:
faces = faceCascade_alttree.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30)
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
adjustment_required = get_adjustment_amount([width, height], x, y, w, h)
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255))
print(f"Adjust right: {adjustment_required[0]}")
print(f"Adjust up : {adjustment_required[1]}")
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

29690
haarcascade_profileface.xml Normal file

File diff suppressed because it is too large Load diff

6729
haarcascade_smile.xml Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,9 +1,6 @@
{ pkgs ? import <nixpkgs> {} }: { pkgs ? import <nixpkgs> {} }:
let let
my-python-packages = ps: with ps; [
numpy
# other python packages
];
in in
pkgs.mkShell { pkgs.mkShell {
buildInputs = with pkgs.python311Packages; [ buildInputs = with pkgs.python311Packages; [