add disclaimer checks and tests (#202)

* add disclaimer checks and tests

* add changes to display

* webcolors library update changes

* disclosure instead of disclaimer

* change interface to disclosure

* update demo notebook

* improve explorer display of disclosure env variable, update docs notebook
Этот коммит содержится в:
Inga Ulusoy 2024-06-12 09:49:35 +02:00 коммит произвёл GitHub
родитель 4ac760e690
Коммит 894ad0938b
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
9 изменённых файлов: 345 добавлений и 35 удалений

Просмотреть файл

@ -5,7 +5,7 @@ except ImportError:
import importlib_metadata as metadata # type: ignore
from ammico.cropposts import crop_media_posts, crop_posts_from_refs
from ammico.display import AnalysisExplorer
from ammico.faces import EmotionDetector
from ammico.faces import EmotionDetector, ethical_disclosure
from ammico.multimodal_search import MultimodalSearch
from ammico.summary import SummaryDetector
from ammico.text import TextDetector, TextAnalyzer, PostprocessText
@ -27,4 +27,5 @@ __all__ = [
"PostprocessText",
"find_files",
"get_dataframe",
"ethical_disclosure",
]

Просмотреть файл

@ -120,7 +120,7 @@ class ColorDetector(AnalysisMethod):
output_color = output_color.lower().replace("grey", "gray")
except ValueError:
delta_e_lst = []
filtered_colors = webcolors.CSS3_NAMES_TO_HEX
filtered_colors = webcolors._definitions._CSS3_NAMES_TO_HEX
for _, img_hex in filtered_colors.items():
cur_clr = webcolors.hex_to_rgb(img_hex)

Просмотреть файл

@ -101,6 +101,9 @@ class AnalysisExplorer:
State("setting_Text_revision_numbers", "value"),
State("setting_Emotion_emotion_threshold", "value"),
State("setting_Emotion_race_threshold", "value"),
State("setting_Emotion_gender_threshold", "value"),
State("setting_Emotion_age_threshold", "value"),
State("setting_Emotion_env_var", "value"),
State("setting_Color_delta_e_method", "value"),
State("setting_Summary_analysis_type", "value"),
State("setting_Summary_model", "value"),
@ -200,6 +203,13 @@ class AnalysisExplorer:
style={"width": "100%"},
),
),
dbc.Col(
[
html.P(
"Select name of the environment variable to accept or reject the disclosure*:"
),
]
),
dbc.Col(
dcc.Input(
type="text",
@ -246,6 +256,48 @@ class AnalysisExplorer:
],
align="start",
),
dbc.Col(
[
html.P("Gender threshold"),
dcc.Input(
type="number",
value=50,
max=100,
min=0,
id="setting_Emotion_gender_threshold",
style={"width": "100%"},
),
],
align="start",
),
dbc.Col(
[
html.P("Age threshold"),
dcc.Input(
type="number",
value=50,
max=100,
min=0,
id="setting_Emotion_age_threshold",
style={"width": "100%"},
),
],
align="start",
),
dbc.Col(
[
html.P(
"Disclosure acceptance environment variable"
),
dcc.Input(
type="text",
value="DISCLOSURE_AMMICO",
id="setting_Emotion_env_var",
style={"width": "100%"},
),
],
align="start",
),
],
style={"width": "100%"},
),
@ -441,6 +493,9 @@ class AnalysisExplorer:
settings_text_revision_numbers: str,
setting_emotion_emotion_threshold: int,
setting_emotion_race_threshold: int,
setting_emotion_gender_threshold: int,
setting_emotion_age_threshold: int,
setting_emotion_env_var: str,
setting_color_delta_e_method: str,
setting_summary_analysis_type: str,
setting_summary_model: str,
@ -493,8 +548,15 @@ class AnalysisExplorer:
elif detector_value == "EmotionDetector":
detector_class = identify_function(
image_copy,
race_threshold=setting_emotion_race_threshold,
emotion_threshold=setting_emotion_emotion_threshold,
race_threshold=setting_emotion_race_threshold,
gender_threshold=setting_emotion_gender_threshold,
age_threshold=setting_emotion_age_threshold,
accept_disclosure=(
setting_emotion_env_var
if setting_emotion_env_var
else "DISCLOSURE_AMMICO"
),
)
elif detector_value == "ColorDetector":
detector_class = identify_function(

Просмотреть файл

@ -80,12 +80,78 @@ retinaface_model = DownloadResource(
)
def ethical_disclosure(accept_disclosure: str = "DISCLOSURE_AMMICO"):
"""
Asks the user to accept the ethical disclosure.
Args:
accept_disclosure (str): The name of the disclosure variable (default: "DISCLOSURE_AMMICO").
"""
if not os.environ.get(accept_disclosure):
accepted = _ask_for_disclosure_acceptance(accept_disclosure)
elif os.environ.get(accept_disclosure) == "False":
accepted = False
elif os.environ.get(accept_disclosure) == "True":
accepted = True
else:
print(
"Could not determine disclosure - skipping \
race/ethnicity, gender and age detection."
)
accepted = False
return accepted
def _ask_for_disclosure_acceptance(accept_disclosure: str = "DISCLOSURE_AMMICO"):
"""
Asks the user to accept the disclosure.
"""
print("This analysis uses the DeepFace and RetinaFace libraries.")
print(
"""
DeepFace and RetinaFace provide wrappers to trained models in face recognition and
emotion detection. Age, gender and race / ethnicity models were trained
on the backbone of VGG-Face with transfer learning.
ETHICAL DISCLOSURE STATEMENT:
The Emotion Detector uses RetinaFace to probabilistically assess the gender, age and
race of the detected faces. Such assessments may not reflect how the individuals
identified by the tool view themselves. Additionally, the classification is carried
out in simplistic categories and contains only the most basic classes, for example
male and female for gender. By continuing to use the tool, you certify that you
understand the ethical implications such assessments have for the interpretation of
the results.
"""
)
answer = input("Do you accept the disclosure? (yes/no): ")
answer = answer.lower().strip()
if answer == "yes":
print("You have accepted the disclosure.")
print(
"""Age, gender, race/ethnicity detection will be performed based on the provided
confidence thresholds."""
)
os.environ[accept_disclosure] = "True"
accepted = True
elif answer == "no":
print("You have not accepted the disclosure.")
print("No age, gender, race/ethnicity detection will be performed.")
os.environ[accept_disclosure] = "False"
accepted = False
else:
print("Please answer with yes or no.")
accepted = _ask_for_disclosure_acceptance()
return accepted
class EmotionDetector(AnalysisMethod):
def __init__(
self,
subdict: dict,
emotion_threshold: float = 50.0,
race_threshold: float = 50.0,
gender_threshold: float = 50.0,
age_threshold: float = 50.0,
accept_disclosure: str = "DISCLOSURE_AMMICO",
) -> None:
"""
Initializes the EmotionDetector object.
@ -94,6 +160,10 @@ class EmotionDetector(AnalysisMethod):
subdict (dict): The dictionary to store the analysis results.
emotion_threshold (float): The threshold for detecting emotions (default: 50.0).
race_threshold (float): The threshold for detecting race (default: 50.0).
gender_threshold (float): The threshold for detecting gender (default: 50.0).
age_threshold (float): The threshold for detecting age (default: 50.0).
accept_disclosure (str): The name of the disclosure variable, that is
set upon accepting the disclosure (default: "DISCLOSURE_AMMICO").
"""
super().__init__(subdict)
self.subdict.update(self.set_keys())
@ -102,8 +172,14 @@ class EmotionDetector(AnalysisMethod):
raise ValueError("Emotion threshold must be between 0 and 100.")
if race_threshold < 0 or race_threshold > 100:
raise ValueError("Race threshold must be between 0 and 100.")
if gender_threshold < 0 or gender_threshold > 100:
raise ValueError("Gender threshold must be between 0 and 100.")
if age_threshold < 0 or age_threshold > 100:
raise ValueError("Age threshold must be between 0 and 100.")
self.emotion_threshold = emotion_threshold
self.race_threshold = race_threshold
self.gender_threshold = gender_threshold
self.age_threshold = age_threshold
self.emotion_categories = {
"angry": "Negative",
"disgust": "Negative",
@ -113,6 +189,7 @@ class EmotionDetector(AnalysisMethod):
"surprise": "Neutral",
"neutral": "Neutral",
}
self.accepted = ethical_disclosure(accept_disclosure)
def set_keys(self) -> dict:
"""
@ -143,6 +220,44 @@ class EmotionDetector(AnalysisMethod):
"""
return self.facial_expression_analysis()
def _define_actions(self, fresult: dict) -> list:
# Adapt the features we are looking for depending on whether a mask is worn.
# White masks screw race detection, emotion detection is useless.
# also, depending on the disclosure, we might not want to run the analysis
# for gender, age, ethnicity/race
conditional_actions = {
"all": ["age", "gender", "race", "emotion"],
"all_with_mask": ["age", "gender"],
"restricted_access": ["emotion"],
"restricted_access_with_mask": [],
}
if fresult["wears_mask"] and self.accepted:
actions = conditional_actions["all_with_mask"]
elif fresult["wears_mask"] and not self.accepted:
actions = conditional_actions["restricted_access_with_mask"]
elif not fresult["wears_mask"] and self.accepted:
actions = conditional_actions["all"]
elif not fresult["wears_mask"] and not self.accepted:
actions = conditional_actions["restricted_access"]
else:
raise ValueError(
"Invalid mask detection {} and disclosure \
acceptance {} result.".format(
fresult["wears_mask"], self.accepted
)
)
return actions
def _ensure_deepface_models(self, actions: list):
# Ensure that all data has been fetched by pooch
deepface_face_expression_model.get()
if "race" in actions:
deepface_race_model.get()
if "age" in actions:
deepface_age_model.get()
if "gender" in actions:
deepface_gender_model.get()
def analyze_single_face(self, face: np.ndarray) -> dict:
"""
Analyzes the features of a single face.
@ -156,16 +271,8 @@ class EmotionDetector(AnalysisMethod):
fresult = {}
# Determine whether the face wears a mask
fresult["wears_mask"] = self.wears_mask(face)
# Adapt the features we are looking for depending on whether a mask is worn.
# White masks screw race detection, emotion detection is useless.
actions = ["age", "gender"]
if not fresult["wears_mask"]:
actions = actions + ["race", "emotion"]
# Ensure that all data has been fetched by pooch
deepface_age_model.get()
deepface_face_expression_model.get()
deepface_gender_model.get()
deepface_race_model.get()
actions = self._define_actions(fresult)
self._ensure_deepface_models(actions)
# Run the full DeepFace analysis
fresult.update(
DeepFace.analyze(

Просмотреть файл

@ -166,7 +166,7 @@
"source": [
"image_dict = ammico.find_files(\n",
" # path=\"/content/drive/MyDrive/misinformation-data/\",\n",
" path=data_path.as_posix(),\n",
" path=str(data_path),\n",
" limit=15,\n",
")"
]
@ -177,7 +177,30 @@
"source": [
"## Step 2: Inspect the input files using the graphical user interface\n",
"A Dash user interface is to select the most suitable options for the analysis, before running a complete analysis on the whole data set. The options for each detector module are explained below in the corresponding sections; for example, different models can be selected that will provide slightly different results. This way, the user can interactively explore which settings provide the most accurate results. In the interface, the nested `image_dict` is passed through the `AnalysisExplorer` class. The interface is run on a specific port which is passed using the `port` keyword; if a port is already in use, it will return an error message, in which case the user should select a different port number. \n",
"The interface opens a dash app inside the Jupyter Notebook and allows selection of the input file in the top left dropdown menu, as well as selection of the detector type in the top right, with options for each detector type as explained below. The output of the detector is shown directly on the right next to the image. This way, the user can directly inspect how updating the options for each detector changes the computed results, and find the best settings for a production run."
"The interface opens a dash app inside the Jupyter Notebook and allows selection of the input file in the top left dropdown menu, as well as selection of the detector type in the top right, with options for each detector type as explained below. The output of the detector is shown directly on the right next to the image. This way, the user can directly inspect how updating the options for each detector changes the computed results, and find the best settings for a production run.\n",
"\n",
"### Ethical disclosure statement\n",
"\n",
"If you want to run an analysis using the EmotionDetector detector type, you have first have to respond to an ethical disclosure statement. This disclosure statement ensures that you only use the full capabilities of the EmotionDetector after you have been made aware of its shortcomings.\n",
"\n",
"For this, answer \"yes\" or \"no\" to the below prompt. This will set an environment variable with the name given as in `accept_disclosure`. To re-run the disclosure prompt, unset the variable by uncommenting the line `os.environ.pop(accept_disclosure, None)`. To permanently set this envorinment variable, add it to your shell via your `.profile` or `.bashr` file.\n",
"\n",
"If the disclosure statement is accepted, the EmotionDetector will perform age, gender and race/ethnicity classification dependend on the provided thresholds. If the disclosure is rejected, only the presence of faces and emotion (if not wearing a mask) is detected."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# respond to the disclosure statement\n",
"# this will set an environment variable for you\n",
"# if you do not want to re-accept the disclosure every time, you can set this environment variable in your shell\n",
"# to re-set the environment variable, uncomment the below line\n",
"accept_disclosure = \"DISCLOSURE_AMMICO\"\n",
"# os.environ.pop(accept_disclosure, None)\n",
"_ = ammico.ethical_disclosure(accept_disclosure=accept_disclosure)"
]
},
{
@ -843,7 +866,7 @@
"metadata": {},
"source": [
"## Detection of faces and facial expression analysis\n",
"Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface.\n",
"Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface, but only if the disclosure statement has been accepted (see above).\n",
"\n",
"<img src=\"../../docs/source/_static/emotion_detector.png\" width=\"800\" />\n",
"\n",
@ -853,10 +876,11 @@
"\n",
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
"\n",
"A similar threshold as for the emotion recognition is set for the race detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
"A similar threshold as for the emotion recognition is set for the race/ethnicity, gender and age detection, `race_threshold`, `gender_threshold`, `age_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
"\n",
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold` and \n",
"`race_threshold` are optional:"
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
"\n",
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `age_threshold` are optional:"
]
},
{
@ -866,7 +890,9 @@
"outputs": [],
"source": [
"for key in image_dict.keys():\n",
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50).analyse_image()"
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
" gender_threshold=50, age_threshold=50, \n",
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
]
},
{
@ -1371,7 +1397,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.9.16"
}
},
"nbformat": 4,

Просмотреть файл

@ -42,7 +42,8 @@ def test_AnalysisExplorer(get_AE, get_options):
assert get_AE.update_picture(None) is None
def test_right_output_analysis_summary(get_AE, get_options):
def test_right_output_analysis_summary(get_AE, get_options, monkeypatch):
monkeypatch.setenv("OTHER_VAR", "True")
get_AE._right_output_analysis(
2,
get_options[3],
@ -53,6 +54,9 @@ def test_right_output_analysis_summary(get_AE, get_options):
None,
50,
50,
50,
50,
"OTHER_VAR",
"CIE 1976",
"summary_and_questions",
"base",
@ -60,7 +64,8 @@ def test_right_output_analysis_summary(get_AE, get_options):
)
def test_right_output_analysis_emotions(get_AE, get_options):
def test_right_output_analysis_emotions(get_AE, get_options, monkeypatch):
monkeypatch.setenv("OTHER_VAR", "True")
get_AE._right_output_analysis(
2,
get_options[3],
@ -71,6 +76,9 @@ def test_right_output_analysis_emotions(get_AE, get_options):
None,
50,
50,
50,
50,
"OTHER_VAR",
"CIE 1976",
"summary_and_questions",
"base",

Просмотреть файл

@ -1,14 +1,51 @@
import ammico.faces as fc
import json
import pytest
import os
def test_set_keys():
ed = fc.EmotionDetector({})
def test_init_EmotionDetector(monkeypatch):
# standard input
monkeypatch.setattr("builtins.input", lambda _: "yes")
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
assert ed.subdict["face"] == "No"
assert ed.subdict["multiple_faces"] == "No"
assert ed.subdict["wears_mask"] == ["No"]
assert ed.subdict["emotion"] == [None]
assert ed.subdict["age"] == [None]
assert ed.emotion_threshold == 50
assert ed.age_threshold == 50
assert ed.gender_threshold == 50
assert ed.race_threshold == 50
assert ed.emotion_categories["angry"] == "Negative"
assert ed.emotion_categories["happy"] == "Positive"
assert ed.emotion_categories["surprise"] == "Neutral"
assert ed.accept_disclosure == "OTHER_VAR"
assert os.environ.get(ed.accept_disclosure) == "True"
assert ed.accepted
monkeypatch.delenv(ed.accept_disclosure, raising=False)
# different thresholds
ed = fc.EmotionDetector(
{},
emotion_threshold=80,
race_threshold=30,
gender_threshold=70,
age_threshold=90,
accept_disclosure="OTHER_VAR",
)
assert ed.emotion_threshold == 80
assert ed.race_threshold == 30
assert ed.gender_threshold == 70
assert ed.age_threshold == 90
monkeypatch.delenv(ed.accept_disclosure, raising=False)
# do not accept disclosure
monkeypatch.setattr("builtins.input", lambda _: "no")
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
assert os.environ.get(ed.accept_disclosure) == "False"
assert not ed.accepted
monkeypatch.delenv(ed.accept_disclosure, raising=False)
# now test the exceptions: thresholds
monkeypatch.setattr("builtins.input", lambda _: "yes")
with pytest.raises(ValueError):
fc.EmotionDetector({}, emotion_threshold=150)
with pytest.raises(ValueError):
@ -17,13 +54,56 @@ def test_set_keys():
fc.EmotionDetector({}, race_threshold=150)
with pytest.raises(ValueError):
fc.EmotionDetector({}, race_threshold=-50)
with pytest.raises(ValueError):
fc.EmotionDetector({}, gender_threshold=150)
with pytest.raises(ValueError):
fc.EmotionDetector({}, gender_threshold=-50)
with pytest.raises(ValueError):
fc.EmotionDetector({}, age_threshold=150)
with pytest.raises(ValueError):
fc.EmotionDetector({}, age_threshold=-50)
# test pre-set variables: disclosure
monkeypatch.delattr("builtins.input", raising=False)
monkeypatch.setenv("OTHER_VAR", "something")
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
assert not ed.accepted
monkeypatch.setenv("OTHER_VAR", "False")
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
assert not ed.accepted
monkeypatch.setenv("OTHER_VAR", "True")
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
assert ed.accepted
def test_analyse_faces(get_path):
def test_define_actions(monkeypatch):
monkeypatch.setenv("OTHER_VAR", "True")
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
actions = ed._define_actions({"wears_mask": True})
assert actions == ["age", "gender"]
actions = ed._define_actions({"wears_mask": False})
assert actions == ["age", "gender", "race", "emotion"]
monkeypatch.setenv("OTHER_VAR", "False")
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
actions = ed._define_actions({"wears_mask": True})
assert actions == []
actions = ed._define_actions({"wears_mask": False})
assert actions == ["emotion"]
def test_ensure_deepface_models(monkeypatch):
monkeypatch.setenv("OTHER_VAR", "True")
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
ed._ensure_deepface_models(["age", "gender", "race", "emotion"])
def test_analyse_faces(get_path, monkeypatch):
mydict = {
"filename": get_path + "pexels-pixabay-415829.jpg",
}
mydict.update(fc.EmotionDetector(mydict).analyse_image())
monkeypatch.setenv("OTHER_VAR", "True")
mydict.update(
fc.EmotionDetector(mydict, accept_disclosure="OTHER_VAR").analyse_image()
)
with open(get_path + "example_faces.json", "r") as file:
out_dict = json.load(file)

Просмотреть файл

@ -166,7 +166,7 @@
"source": [
"image_dict = ammico.find_files(\n",
" # path=\"/content/drive/MyDrive/misinformation-data/\",\n",
" path=data_path.as_posix(),\n",
" path=str(data_path),\n",
" limit=15,\n",
")"
]
@ -177,7 +177,30 @@
"source": [
"## Step 2: Inspect the input files using the graphical user interface\n",
"A Dash user interface is to select the most suitable options for the analysis, before running a complete analysis on the whole data set. The options for each detector module are explained below in the corresponding sections; for example, different models can be selected that will provide slightly different results. This way, the user can interactively explore which settings provide the most accurate results. In the interface, the nested `image_dict` is passed through the `AnalysisExplorer` class. The interface is run on a specific port which is passed using the `port` keyword; if a port is already in use, it will return an error message, in which case the user should select a different port number. \n",
"The interface opens a dash app inside the Jupyter Notebook and allows selection of the input file in the top left dropdown menu, as well as selection of the detector type in the top right, with options for each detector type as explained below. The output of the detector is shown directly on the right next to the image. This way, the user can directly inspect how updating the options for each detector changes the computed results, and find the best settings for a production run."
"The interface opens a dash app inside the Jupyter Notebook and allows selection of the input file in the top left dropdown menu, as well as selection of the detector type in the top right, with options for each detector type as explained below. The output of the detector is shown directly on the right next to the image. This way, the user can directly inspect how updating the options for each detector changes the computed results, and find the best settings for a production run.\n",
"\n",
"### Ethical disclosure statement\n",
"\n",
"If you want to run an analysis using the EmotionDetector detector type, you have first have to respond to an ethical disclosure statement. This disclosure statement ensures that you only use the full capabilities of the EmotionDetector after you have been made aware of its shortcomings.\n",
"\n",
"For this, answer \"yes\" or \"no\" to the below prompt. This will set an environment variable with the name given as in `accept_disclosure`. To re-run the disclosure prompt, unset the variable by uncommenting the line `os.environ.pop(accept_disclosure, None)`. To permanently set this envorinment variable, add it to your shell via your `.profile` or `.bashr` file.\n",
"\n",
"If the disclosure statement is accepted, the EmotionDetector will perform age, gender and race/ethnicity classification dependend on the provided thresholds. If the disclosure is rejected, only the presence of faces and emotion (if not wearing a mask) is detected."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# respond to the disclosure statement\n",
"# this will set an environment variable for you\n",
"# if you do not want to re-accept the disclosure every time, you can set this environment variable in your shell\n",
"# to re-set the environment variable, uncomment the below line\n",
"accept_disclosure = \"DISCLOSURE_AMMICO\"\n",
"# os.environ.pop(accept_disclosure, None)\n",
"_ = ammico.ethical_disclosure(accept_disclosure=accept_disclosure)"
]
},
{
@ -822,7 +845,7 @@
"metadata": {},
"source": [
"## Detection of faces and facial expression analysis\n",
"Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface.\n",
"Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface, but only if the disclosure statement has been accepted (see above).\n",
"\n",
"<img src=\"../_static/emotion_detector.png\" width=\"800\" />\n",
"\n",
@ -832,10 +855,11 @@
"\n",
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
"\n",
"A similar threshold as for the emotion recognition is set for the race detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
"A similar threshold as for the emotion recognition is set for the race/ethnicity, gender and age detection, `race_threshold`, `gender_threshold`, `age_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
"\n",
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold` and \n",
"`race_threshold` are optional:"
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
"\n",
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `age_threshold` are optional:"
]
},
{
@ -845,7 +869,9 @@
"outputs": [],
"source": [
"for key in image_dict.keys():\n",
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50).analyse_image()"
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
" gender_threshold=50, age_threshold=50, \n",
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
]
},
{

Просмотреть файл

@ -52,7 +52,7 @@ dependencies = [
"google-cloud-vision",
"dash_bootstrap_components",
"colorgram.py",
"webcolors",
"webcolors>1.13",
"colour-science",
"scikit-learn>1.3.0",
"tqdm"