diff --git a/.gitignore b/.gitignore index b6e4761..15c61c8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +data + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/.gitmodules b/.gitmodules index 65e6169..e69de29 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "Face-Mask-Detection"] - path = Face-Mask-Detection - url = https://github.com/chandrikadeb7/Face-Mask-Detection.git diff --git a/Face-Mask-Detection b/Face-Mask-Detection deleted file mode 160000 index 7e50074..0000000 --- a/Face-Mask-Detection +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 7e500749401bad4bb338790fbdb89b58e41ef2d9 diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..d606398 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +recursive-include misinformation/model *.model diff --git a/misinformation/faces.py b/misinformation/faces.py index 6c073f7..7e43e32 100644 --- a/misinformation/faces.py +++ b/misinformation/faces.py @@ -1,10 +1,20 @@ +import cv2 import ipywidgets +import numpy as np +import os +from tensorflow.keras.models import load_model +from tensorflow.keras.applications.mobilenet_v2 import preprocess_input +from tensorflow.keras.preprocessing.image import img_to_array from IPython.display import display from deepface import DeepFace from retinaface import RetinaFace +# Module-wide storage for lazily loaded mask detection model +mask_detection_model = None + + def facial_expression_analysis(img_path): result = {"filename": img_path} @@ -18,10 +28,19 @@ def facial_expression_analysis(img_path): # Find the biggest face image in the detected ones maxface = max(faces, key=lambda f: f.shape[0] * f.shape[1]) + # Determine whether the face wears a mask + result["wears_mask"] = wears_mask(maxface) + + # Adapt the features we are looking for depending on whether a mask is + # worn. White masks screw race detection, emotion detection is useless. + actions = ["age", "gender"] + if not result["wears_mask"]: + actions = actions + ["race", "emotion"] + # Run the full DeepFace analysis result["deepface_results"] = DeepFace.analyze( img_path=maxface, - actions=["age", "gender", "race", "emotion"], + actions=actions, prog_bar=False, detector_backend="skip", ) @@ -33,6 +52,31 @@ def facial_expression_analysis(img_path): return result +def wears_mask(face): + global mask_detection_model + + # Preprocess the face to match the assumptions of the face mask + # detection model + face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) + face = cv2.resize(face, (224, 224)) + face = img_to_array(face) + face = preprocess_input(face) + face = np.expand_dims(face, axis=0) + + # Lazily load the model + if mask_detection_model is None: + mask_detection_model = load_model( + os.path.join(os.path.split(__file__)[0], "models", "mask_detector.model") + ) + + # Run the model (ignoring output) + with ipywidgets.Output(): + mask, withoutMask = mask_detection_model.predict(face)[0] + + # Convert from np.bool_ to bool to later be able to serialize the result + return bool(mask > withoutMask) + + class JSONContainer: """Expose a Python dictionary as a JSON document in JupyterLab rich display rendering.