Compare commits
No commits in common. "0c46b9a190977783573bbe80e1b4ad4745cf3fc9" and "b1eba5ea2d0402b4a498b3ec8aa4c1d7da0df70a" have entirely different histories.
0c46b9a190
...
b1eba5ea2d
9 changed files with 8 additions and 211486 deletions
|
@ -1,28 +1,6 @@
|
|||
import cv2
|
||||
import numpy as np
|
||||
import argparse
|
||||
import sys
|
||||
import datetime
|
||||
|
||||
def init_argparse() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="FaceDetection",
|
||||
usage="%(prog)s [OPTION]",
|
||||
description="Run face localization"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v", "--version", action="version", version=f"{parser.prog} version 1.0.1"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d", "--dashboard", action='store_true', help="Flag to enable live dashboard with statistics - requires terminal width of 90 columns or greater"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o", "--output", action='store_true', help="show the resultant directions"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-f", "--file", nargs="?", help="File to scan instead of using the camera. Useful for generating training data"
|
||||
)
|
||||
return parser
|
||||
|
||||
multiplication_factor = 0.05
|
||||
|
||||
|
@ -39,47 +17,17 @@ def get_adjustment_amount(imgSize, currentX, currentY, currentW, currentH):
|
|||
|
||||
return [horizontal_adjustment, vertical_adjustment]
|
||||
|
||||
frames_searched = 1
|
||||
faces_found = 0
|
||||
start_time = datetime.datetime.now()
|
||||
|
||||
def draw_dashboard(keep_stat_line = False):
|
||||
global frames_searched, faces_found, start_time
|
||||
|
||||
elapsed_time = datetime.datetime.now() - start_time
|
||||
|
||||
hours, remainder = divmod(elapsed_time.total_seconds(), 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
|
||||
f_found = f"{faces_found} Faces found".ljust(16, ' ')
|
||||
f_searched = f"{frames_searched} Frames searched".ljust(21, ' ')
|
||||
success_rate = f"{round((faces_found / frames_searched) * 100, 1)}% Success rate".ljust(16, ' ')
|
||||
|
||||
if keep_stat_line:
|
||||
print(f"{f_found} | {f_searched} | {success_rate} | {round(hours)}h {round(minutes)}m {round(seconds)}s elapsed", flush=True)
|
||||
else:
|
||||
print(f"{f_found} | {f_searched} | {success_rate} | {round(hours)}h {round(minutes)}m {round(seconds)}s elapsed", end="\r", flush=True)
|
||||
|
||||
|
||||
parser = init_argparse()
|
||||
args = parser.parse_args()
|
||||
|
||||
cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1.
|
||||
faceCascade = cv2.CascadeClassifier(r"./lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING
|
||||
faceCascade_default = cv2.CascadeClassifier(r"./haarcascade_frontalface_default.xml")
|
||||
faceCascade_alt = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt.xml")
|
||||
faceCascade_alt2 = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt2.xml")
|
||||
faceCascade_alttree = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt_tree.xml")
|
||||
profileFaceCascade = cv2.CascadeClassifier(r"./haarcascade_profileface.xml")
|
||||
|
||||
tmp, frm = cap.read()
|
||||
height, width, channels = frm.shape
|
||||
# print(f"{height*.25}, {width}")
|
||||
print(f"{height*.25}, {width}")
|
||||
del tmp, frm
|
||||
|
||||
#Color is 1, grayscale is 0, and the unchanged is -1
|
||||
while(True):
|
||||
ret, frame = cap.read()
|
||||
frames_searched += 1
|
||||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# Detect faces in the image
|
||||
|
@ -90,60 +38,16 @@ while(True):
|
|||
minSize=(30, 30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = faceCascade_default.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = profileFaceCascade.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = faceCascade_alt.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = faceCascade_alt2.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = faceCascade_alttree.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
# Draw a rectangle around the faces
|
||||
for (x, y, w, h) in faces:
|
||||
faces_found += 1
|
||||
adjustment_required = get_adjustment_amount([width, height], x, y, w, h)
|
||||
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255))
|
||||
if args.output:
|
||||
print(f"Adjust right: {adjustment_required[0]}".ljust(90, ' '), flush=True)
|
||||
print(f"Adjust up : {adjustment_required[1]}", flush=True)
|
||||
print(f"Adjust right: {adjustment_required[0]}")
|
||||
print(f"Adjust up : {adjustment_required[1]}")
|
||||
cv2.imshow('frame', frame)
|
||||
|
||||
if args.dashboard:
|
||||
draw_dashboard()
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
draw_dashboard(keep_stat_line=True)
|
||||
cap.release()
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
multiplication_factor = 0.05
|
||||
|
||||
def get_adjustment_amount(imgSize, currentX, currentY, currentW, currentH):
|
||||
|
||||
current_top_left = [currentX, currentY]
|
||||
current_bottom_right = [currentX + currentW, currentY + currentH]
|
||||
|
||||
current_top_right = [currentX + currentW, currentY]
|
||||
|
||||
# find the difference between the left gap and the right gap, divide it by two, and multiply it by the speed scale
|
||||
horizontal_adjustment = multiplication_factor * (currentX - (imgSize[0] - current_top_right[0])) / 2
|
||||
vertical_adjustment = multiplication_factor * (currentY - (imgSize[0] - current_bottom_right[1])) / 2
|
||||
|
||||
return [horizontal_adjustment, vertical_adjustment]
|
||||
|
||||
cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1.
|
||||
faceCascade = cv2.CascadeClassifier(r"./lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING
|
||||
faceCascade_default = cv2.CascadeClassifier(r"./haarcascade_frontalface_default.xml")
|
||||
faceCascade_alt = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt.xml")
|
||||
faceCascade_alt2 = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt2.xml")
|
||||
faceCascade_alttree = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt_tree.xml")
|
||||
profileFaceCascade = cv2.CascadeClassifier(r"./haarcascade_profileface.xml")
|
||||
|
||||
|
||||
tmp, frm = cap.read()
|
||||
height, width, channels = frm.shape
|
||||
print(f"{height*.25}, {width}")
|
||||
del tmp, frm
|
||||
|
||||
#Color is 1, grayscale is 0, and the unchanged is -1
|
||||
while(True):
|
||||
ret, frame = cap.read()
|
||||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# Detect faces in the image
|
||||
faces = faceCascade.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30, 30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = faceCascade_default.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = profileFaceCascade.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = faceCascade_alt.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = faceCascade_alt2.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
|
||||
if len(faces) == 0:
|
||||
faces = faceCascade_alttree.detectMultiScale(
|
||||
gray,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=5,
|
||||
minSize=(30,30)
|
||||
)
|
||||
|
||||
|
||||
# Draw a rectangle around the faces
|
||||
for (x, y, w, h) in faces:
|
||||
adjustment_required = get_adjustment_amount([width, height], x, y, w, h)
|
||||
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255))
|
||||
print(f"Adjust right: {adjustment_required[0]}")
|
||||
print(f"Adjust up : {adjustment_required[1]}")
|
||||
cv2.imshow('frame', frame)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
cap.release()
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,9 @@
|
|||
{ pkgs ? import <nixpkgs> {} }:
|
||||
let
|
||||
|
||||
my-python-packages = ps: with ps; [
|
||||
numpy
|
||||
# other python packages
|
||||
];
|
||||
in
|
||||
pkgs.mkShell {
|
||||
buildInputs = with pkgs.python311Packages; [
|
||||
|
|
Loading…
Reference in a new issue