From 542abea6c76c3c0dca392fcbf8ec62a332596953 Mon Sep 17 00:00:00 2001 From: Nickiel12 <35903114+Nickiel12@users.noreply.github.com> Date: Sat, 30 Sep 2023 10:56:10 -0700 Subject: [PATCH] removed old file, renamed main file --- Face_Detect with borders.py => Main.py | 372 ++++++++++++------------- NN_dataset_generator.py | 99 ------- 2 files changed, 186 insertions(+), 285 deletions(-) rename Face_Detect with borders.py => Main.py (97%) delete mode 100644 NN_dataset_generator.py diff --git a/Face_Detect with borders.py b/Main.py similarity index 97% rename from Face_Detect with borders.py rename to Main.py index 7cc2148..0733ebd 100644 --- a/Face_Detect with borders.py +++ b/Main.py @@ -1,186 +1,186 @@ -import cv2 -import numpy as np -import argparse -import sys -import time -import os -import datetime - -def dir_path(string): - if os.path.exists(string): - return string - else: - raise NotADirectoryError(string) - -def init_argparse() -> argparse.ArgumentParser: - parser = argparse.ArgumentParser( - prog="FaceDetection", - usage="%(prog)s [OPTION]", - description="Run face localization" - ) - parser.add_argument( - "-v", "--version", action="version", version=f"{parser.prog} version 1.0.1" - ) - parser.add_argument( - "-d", "--dashboard", action='store_true', help="Flag to enable live dashboard with statistics - requires terminal width of 90 columns or greater" - ) - parser.add_argument( - "-o", "--output", action='store_true', help="show the resultant directions" - ) - parser.add_argument( - "-f", "--file", type=dir_path, nargs="?", help="File to scan instead of using the camera. Useful for generating training data" - ) - parser.add_argument( - "-s", "--no-screen", action='store_true', help="Do not show the successful frames" - ) - parser.add_argument( - "-t", "--training-data", action='store_true', help="When set, saves successful face-location images and coordinates to use for future training data" - ) - return parser - -multiplication_factor = 0.05 - -def get_adjustment_amount(imgSize, currentX, currentY, currentW, currentH): - - current_top_left = [currentX, currentY] - current_bottom_right = [currentX + currentW, currentY + currentH] - - current_top_right = [currentX + currentW, currentY] - - # find the difference between the left gap and the right gap, divide it by two, and multiply it by the speed scale - horizontal_adjustment = multiplication_factor * (currentX - (imgSize[0] - current_top_right[0])) / 2 - vertical_adjustment = multiplication_factor * (currentY - (imgSize[0] - current_bottom_right[1])) / 2 - - return [horizontal_adjustment, vertical_adjustment] - -frames_searched = 1 -faces_found = 0 -start_time = datetime.datetime.now() - -def draw_dashboard(keep_stat_line = False): - global frames_searched, faces_found, start_time - - elapsed_time = datetime.datetime.now() - start_time - - hours, remainder = divmod(elapsed_time.total_seconds(), 3600) - minutes, seconds = divmod(remainder, 60) - - f_found = f"{faces_found} Faces found".ljust(16, ' ') - f_searched = f"{frames_searched} Frames searched".ljust(21, ' ') - success_rate = f"{round((faces_found / frames_searched) * 100, 1)}% Success rate".ljust(16, ' ') - - if keep_stat_line: - print(f"{f_found} | {f_searched} | {success_rate} | {round(hours)}h {round(minutes)}m {round(seconds)}s elapsed", flush=True) - else: - print(f"{f_found} | {f_searched} | {success_rate} | {round(hours)}h {round(minutes)}m {round(seconds)}s elapsed", end="\r", flush=True) - - -parser = init_argparse() -args = parser.parse_args() - -if args.file: - cap = cv2.VideoCapture(args.file) -else: - cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1. -faceCascade = cv2.CascadeClassifier(r"./cascades/lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING -faceCascade_default = cv2.CascadeClassifier(r"./cascades/haarcascade_frontalface_default.xml") -faceCascade_alt = cv2.CascadeClassifier(r"./cascades/haarcascade_frontalface_alt.xml") -faceCascade_alt2 = cv2.CascadeClassifier(r"./cascades/haarcascade_frontalface_alt2.xml") -faceCascade_alttree = cv2.CascadeClassifier(r"./cascades/haarcascade_frontalface_alt_tree.xml") -profileFaceCascade = cv2.CascadeClassifier(r"./cascades/haarcascade_profileface.xml") - -datestamp = "{:%Y_%m_%d %H_%M_%S}".format(datetime.datetime.now()) -output_dir = r"./output/" + datestamp + r"/" - - -if args.training_data: - if not os.path.exists(output_dir): - os.makedirs(output_dir) - with open(output_dir + r"found_faces.csv", 'a') as fd: - fd.write(f"frame_name, x, y, width, height\n") - -tmp, frm = cap.read() -height, width, channels = frm.shape -# print(f"{height*.25}, {width}") -del tmp, frm -#Color is 1, grayscale is 0, and the unchanged is -1 -while(True): - ret, frame = cap.read() - frames_searched += 1 - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - - # Detect faces in the image - faces = faceCascade.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30, 30) - ) - - if len(faces) == 0: - faces = faceCascade_default.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - if len(faces) == 0: - faces = profileFaceCascade.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - if len(faces) == 0: - faces = faceCascade_alt.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - if len(faces) == 0: - faces = faceCascade_alt2.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - if len(faces) == 0: - faces = faceCascade_alttree.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - # Draw a rectangle around the faces - for (x, y, w, h) in faces: - if args.training_data: - frame_name = frames_searched - with open(output_dir + r"found_faces.csv", 'a') as fd: - fd.write(f"frame_{frame_name}.jpg, {x}, {y}, {w}, {h}\n") - cv2.imwrite(output_dir + f"frame_{frame_name}.jpg", frame) - - faces_found += 1 - adjustment_required = get_adjustment_amount([width, height], x, y, w, h) - cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255)) - - if args.output: - print(f"Adjust right: {adjustment_required[0]}".ljust(90, ' '), flush=True) - print(f"Adjust up : {adjustment_required[1]}", flush=True) - - if not args.no_screen: - cv2.imshow('frame', frame) - - if args.dashboard: - draw_dashboard() - - if cv2.waitKey(1) & 0xFF == ord('q'): - break - -draw_dashboard(keep_stat_line=True) -cap.release() +import cv2 +import numpy as np +import argparse +import sys +import time +import os +import datetime + +def dir_path(string): + if os.path.exists(string): + return string + else: + raise NotADirectoryError(string) + +def init_argparse() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="FaceDetection", + usage="%(prog)s [OPTION]", + description="Run face localization" + ) + parser.add_argument( + "-v", "--version", action="version", version=f"{parser.prog} version 1.0.1" + ) + parser.add_argument( + "-d", "--dashboard", action='store_true', help="Flag to enable live dashboard with statistics - requires terminal width of 90 columns or greater" + ) + parser.add_argument( + "-o", "--output", action='store_true', help="show the resultant directions" + ) + parser.add_argument( + "-f", "--file", type=dir_path, nargs="?", help="File to scan instead of using the camera. Useful for generating training data" + ) + parser.add_argument( + "-s", "--no-screen", action='store_true', help="Do not show the successful frames" + ) + parser.add_argument( + "-t", "--training-data", action='store_true', help="When set, saves successful face-location images and coordinates to use for future training data" + ) + return parser + +multiplication_factor = 0.05 + +def get_adjustment_amount(imgSize, currentX, currentY, currentW, currentH): + + current_top_left = [currentX, currentY] + current_bottom_right = [currentX + currentW, currentY + currentH] + + current_top_right = [currentX + currentW, currentY] + + # find the difference between the left gap and the right gap, divide it by two, and multiply it by the speed scale + horizontal_adjustment = multiplication_factor * (currentX - (imgSize[0] - current_top_right[0])) / 2 + vertical_adjustment = multiplication_factor * (currentY - (imgSize[0] - current_bottom_right[1])) / 2 + + return [horizontal_adjustment, vertical_adjustment] + +frames_searched = 1 +faces_found = 0 +start_time = datetime.datetime.now() + +def draw_dashboard(keep_stat_line = False): + global frames_searched, faces_found, start_time + + elapsed_time = datetime.datetime.now() - start_time + + hours, remainder = divmod(elapsed_time.total_seconds(), 3600) + minutes, seconds = divmod(remainder, 60) + + f_found = f"{faces_found} Faces found".ljust(16, ' ') + f_searched = f"{frames_searched} Frames searched".ljust(21, ' ') + success_rate = f"{round((faces_found / frames_searched) * 100, 1)}% Success rate".ljust(16, ' ') + + if keep_stat_line: + print(f"{f_found} | {f_searched} | {success_rate} | {round(hours)}h {round(minutes)}m {round(seconds)}s elapsed", flush=True) + else: + print(f"{f_found} | {f_searched} | {success_rate} | {round(hours)}h {round(minutes)}m {round(seconds)}s elapsed", end="\r", flush=True) + + +parser = init_argparse() +args = parser.parse_args() + +if args.file: + cap = cv2.VideoCapture(args.file) +else: + cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1. +faceCascade = cv2.CascadeClassifier(r"./cascades/lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING +faceCascade_default = cv2.CascadeClassifier(r"./cascades/haarcascade_frontalface_default.xml") +faceCascade_alt = cv2.CascadeClassifier(r"./cascades/haarcascade_frontalface_alt.xml") +faceCascade_alt2 = cv2.CascadeClassifier(r"./cascades/haarcascade_frontalface_alt2.xml") +faceCascade_alttree = cv2.CascadeClassifier(r"./cascades/haarcascade_frontalface_alt_tree.xml") +profileFaceCascade = cv2.CascadeClassifier(r"./cascades/haarcascade_profileface.xml") + +datestamp = "{:%Y_%m_%d %H_%M_%S}".format(datetime.datetime.now()) +output_dir = r"./output/" + datestamp + r"/" + + +if args.training_data: + if not os.path.exists(output_dir): + os.makedirs(output_dir) + with open(output_dir + r"found_faces.csv", 'a') as fd: + fd.write(f"frame_name, x, y, width, height\n") + +tmp, frm = cap.read() +height, width, channels = frm.shape +# print(f"{height*.25}, {width}") +del tmp, frm +#Color is 1, grayscale is 0, and the unchanged is -1 +while(True): + ret, frame = cap.read() + frames_searched += 1 + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + # Detect faces in the image + faces = faceCascade.detectMultiScale( + gray, + scaleFactor=1.1, + minNeighbors=5, + minSize=(30, 30) + ) + + if len(faces) == 0: + faces = faceCascade_default.detectMultiScale( + gray, + scaleFactor=1.1, + minNeighbors=5, + minSize=(30,30) + ) + + if len(faces) == 0: + faces = profileFaceCascade.detectMultiScale( + gray, + scaleFactor=1.1, + minNeighbors=5, + minSize=(30,30) + ) + + if len(faces) == 0: + faces = faceCascade_alt.detectMultiScale( + gray, + scaleFactor=1.1, + minNeighbors=5, + minSize=(30,30) + ) + + if len(faces) == 0: + faces = faceCascade_alt2.detectMultiScale( + gray, + scaleFactor=1.1, + minNeighbors=5, + minSize=(30,30) + ) + + if len(faces) == 0: + faces = faceCascade_alttree.detectMultiScale( + gray, + scaleFactor=1.1, + minNeighbors=5, + minSize=(30,30) + ) + + # Draw a rectangle around the faces + for (x, y, w, h) in faces: + if args.training_data: + frame_name = frames_searched + with open(output_dir + r"found_faces.csv", 'a') as fd: + fd.write(f"frame_{frame_name}.jpg, {x}, {y}, {w}, {h}\n") + cv2.imwrite(output_dir + f"frame_{frame_name}.jpg", frame) + + faces_found += 1 + adjustment_required = get_adjustment_amount([width, height], x, y, w, h) + cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255)) + + if args.output: + print(f"Adjust right: {adjustment_required[0]}".ljust(90, ' '), flush=True) + print(f"Adjust up : {adjustment_required[1]}", flush=True) + + if not args.no_screen: + cv2.imshow('frame', frame) + + if args.dashboard: + draw_dashboard() + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +draw_dashboard(keep_stat_line=True) +cap.release() diff --git a/NN_dataset_generator.py b/NN_dataset_generator.py deleted file mode 100644 index 25dcc79..0000000 --- a/NN_dataset_generator.py +++ /dev/null @@ -1,99 +0,0 @@ -import cv2 -import numpy as np - - -multiplication_factor = 0.05 - -def get_adjustment_amount(imgSize, currentX, currentY, currentW, currentH): - - current_top_left = [currentX, currentY] - current_bottom_right = [currentX + currentW, currentY + currentH] - - current_top_right = [currentX + currentW, currentY] - - # find the difference between the left gap and the right gap, divide it by two, and multiply it by the speed scale - horizontal_adjustment = multiplication_factor * (currentX - (imgSize[0] - current_top_right[0])) / 2 - vertical_adjustment = multiplication_factor * (currentY - (imgSize[0] - current_bottom_right[1])) / 2 - - return [horizontal_adjustment, vertical_adjustment] - -cap = cv2.VideoCapture(0, cv2.IMREAD_GRAYSCALE) # instead of grayscale you can also use -1, 0, or 1. -faceCascade = cv2.CascadeClassifier(r"./lbpcascade_frontalface.xml") # CHECK THIS FIRST TROUBLE SHOOTING -faceCascade_default = cv2.CascadeClassifier(r"./haarcascade_frontalface_default.xml") -faceCascade_alt = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt.xml") -faceCascade_alt2 = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt2.xml") -faceCascade_alttree = cv2.CascadeClassifier(r"./haarcascade_frontalface_alt_tree.xml") -profileFaceCascade = cv2.CascadeClassifier(r"./haarcascade_profileface.xml") - - -tmp, frm = cap.read() -height, width, channels = frm.shape -print(f"{height*.25}, {width}") -del tmp, frm - -#Color is 1, grayscale is 0, and the unchanged is -1 -while(True): - ret, frame = cap.read() - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - - # Detect faces in the image - faces = faceCascade.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30, 30) - ) - - if len(faces) == 0: - faces = faceCascade_default.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - if len(faces) == 0: - faces = profileFaceCascade.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - if len(faces) == 0: - faces = faceCascade_alt.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - if len(faces) == 0: - faces = faceCascade_alt2.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - if len(faces) == 0: - faces = faceCascade_alttree.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30,30) - ) - - - # Draw a rectangle around the faces - for (x, y, w, h) in faces: - adjustment_required = get_adjustment_amount([width, height], x, y, w, h) - cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255)) - print(f"Adjust right: {adjustment_required[0]}") - print(f"Adjust up : {adjustment_required[1]}") - cv2.imshow('frame', frame) - - if cv2.waitKey(1) & 0xFF == ord('q'): - break - -cap.release()