зеркало из
https://github.com/ssciwr/AMMICO.git
synced 2025-10-29 13:06:04 +02:00
Update deepface requirement from <=0.0.75 to <=0.0.92 (#203)
* Update deepface requirement from <=0.0.75 to <=0.0.92 --- updated-dependencies: - dependency-name: deepface dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com> * more extensive testing for faces, adapt changes from deepface * include gender threshold in display and notebook * update documentation * increase detection threshold for tests * update handling of missing dict keys * update notebook * make sure text was found on image before analysing --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Inga Ulusoy <inga.ulusoy@uni-heidelberg.de>
Этот коммит содержится в:
родитель
17846fbe71
Коммит
42b6732308
@ -24,7 +24,7 @@ Use pre-processed image files such as social media posts with comments and proce
|
||||
1. Question answering
|
||||
1. Performing person and face recognition in images
|
||||
1. Face mask detection
|
||||
1. Probabilistic detection of age, gender and race detection
|
||||
1. Probabilistic detection of age, gender and race
|
||||
1. Emotion recognition
|
||||
1. Color analysis
|
||||
1. Analyse hue and percentage of color on image
|
||||
|
||||
@ -101,6 +101,7 @@ class AnalysisExplorer:
|
||||
State("setting_Text_revision_numbers", "value"),
|
||||
State("setting_Emotion_emotion_threshold", "value"),
|
||||
State("setting_Emotion_race_threshold", "value"),
|
||||
State("setting_Emotion_gender_threshold", "value"),
|
||||
State("setting_Emotion_env_var", "value"),
|
||||
State("setting_Color_delta_e_method", "value"),
|
||||
State("setting_Summary_analysis_type", "value"),
|
||||
@ -201,13 +202,6 @@ class AnalysisExplorer:
|
||||
style={"width": "100%"},
|
||||
),
|
||||
),
|
||||
dbc.Col(
|
||||
[
|
||||
html.P(
|
||||
"Select name of the environment variable to accept or reject the disclosure*:"
|
||||
),
|
||||
]
|
||||
),
|
||||
dbc.Col(
|
||||
dcc.Input(
|
||||
type="text",
|
||||
@ -254,6 +248,20 @@ class AnalysisExplorer:
|
||||
],
|
||||
align="start",
|
||||
),
|
||||
dbc.Col(
|
||||
[
|
||||
html.P("Gender threshold"),
|
||||
dcc.Input(
|
||||
type="number",
|
||||
value=50,
|
||||
max=100,
|
||||
min=0,
|
||||
id="setting_Emotion_gender_threshold",
|
||||
style={"width": "100%"},
|
||||
),
|
||||
],
|
||||
align="start",
|
||||
),
|
||||
dbc.Col(
|
||||
[
|
||||
html.P(
|
||||
@ -463,6 +471,7 @@ class AnalysisExplorer:
|
||||
settings_text_revision_numbers: str,
|
||||
setting_emotion_emotion_threshold: int,
|
||||
setting_emotion_race_threshold: int,
|
||||
setting_emotion_gender_threshold: int,
|
||||
setting_emotion_env_var: str,
|
||||
setting_color_delta_e_method: str,
|
||||
setting_summary_analysis_type: str,
|
||||
@ -518,6 +527,7 @@ class AnalysisExplorer:
|
||||
image_copy,
|
||||
emotion_threshold=setting_emotion_emotion_threshold,
|
||||
race_threshold=setting_emotion_race_threshold,
|
||||
gender_threshold=setting_emotion_gender_threshold,
|
||||
accept_disclosure=(
|
||||
setting_emotion_env_var
|
||||
if setting_emotion_env_var
|
||||
|
||||
@ -149,6 +149,7 @@ class EmotionDetector(AnalysisMethod):
|
||||
subdict: dict,
|
||||
emotion_threshold: float = 50.0,
|
||||
race_threshold: float = 50.0,
|
||||
gender_threshold: float = 50.0,
|
||||
accept_disclosure: str = "DISCLOSURE_AMMICO",
|
||||
) -> None:
|
||||
"""
|
||||
@ -158,6 +159,7 @@ class EmotionDetector(AnalysisMethod):
|
||||
subdict (dict): The dictionary to store the analysis results.
|
||||
emotion_threshold (float): The threshold for detecting emotions (default: 50.0).
|
||||
race_threshold (float): The threshold for detecting race (default: 50.0).
|
||||
gender_threshold (float): The threshold for detecting gender (default: 50.0).
|
||||
accept_disclosure (str): The name of the disclosure variable, that is
|
||||
set upon accepting the disclosure (default: "DISCLOSURE_AMMICO").
|
||||
"""
|
||||
@ -168,8 +170,11 @@ class EmotionDetector(AnalysisMethod):
|
||||
raise ValueError("Emotion threshold must be between 0 and 100.")
|
||||
if race_threshold < 0 or race_threshold > 100:
|
||||
raise ValueError("Race threshold must be between 0 and 100.")
|
||||
if gender_threshold < 0 or gender_threshold > 100:
|
||||
raise ValueError("Gender threshold must be between 0 and 100.")
|
||||
self.emotion_threshold = emotion_threshold
|
||||
self.race_threshold = race_threshold
|
||||
self.gender_threshold = gender_threshold
|
||||
self.emotion_categories = {
|
||||
"angry": "Negative",
|
||||
"disgust": "Negative",
|
||||
@ -193,11 +198,6 @@ class EmotionDetector(AnalysisMethod):
|
||||
"multiple_faces": "No",
|
||||
"no_faces": 0,
|
||||
"wears_mask": ["No"],
|
||||
"age": [None],
|
||||
"gender": [None],
|
||||
"race": [None],
|
||||
"emotion": [None],
|
||||
"emotion (category)": [None],
|
||||
}
|
||||
return params
|
||||
|
||||
@ -217,7 +217,7 @@ class EmotionDetector(AnalysisMethod):
|
||||
# for gender, age, ethnicity/race
|
||||
conditional_actions = {
|
||||
"all": ["age", "gender", "race", "emotion"],
|
||||
"all_with_mask": ["age", "gender"],
|
||||
"all_with_mask": ["age"],
|
||||
"restricted_access": ["emotion"],
|
||||
"restricted_access_with_mask": [],
|
||||
}
|
||||
@ -239,7 +239,8 @@ class EmotionDetector(AnalysisMethod):
|
||||
|
||||
def _ensure_deepface_models(self):
|
||||
# Ensure that all data has been fetched by pooch
|
||||
deepface_face_expression_model.get()
|
||||
if "emotion" in self.actions:
|
||||
deepface_face_expression_model.get()
|
||||
if "race" in self.actions:
|
||||
deepface_race_model.get()
|
||||
if "age" in self.actions:
|
||||
@ -249,7 +250,7 @@ class EmotionDetector(AnalysisMethod):
|
||||
|
||||
def analyze_single_face(self, face: np.ndarray) -> dict:
|
||||
"""
|
||||
Analyzes the features of a single face.
|
||||
Analyzes the features of a single face on the image.
|
||||
|
||||
Args:
|
||||
face (np.ndarray): The face image array.
|
||||
@ -263,17 +264,15 @@ class EmotionDetector(AnalysisMethod):
|
||||
self._define_actions(fresult)
|
||||
self._ensure_deepface_models()
|
||||
# Run the full DeepFace analysis
|
||||
fresult.update(
|
||||
DeepFace.analyze(
|
||||
img_path=face,
|
||||
actions=self.actions,
|
||||
prog_bar=False,
|
||||
detector_backend="skip",
|
||||
)
|
||||
# this returns a list of dictionaries
|
||||
# one dictionary per face that is detected in the image
|
||||
# since we are only passing a subregion of the image
|
||||
# that contains one face, the list will only contain one dict
|
||||
fresult["result"] = DeepFace.analyze(
|
||||
img_path=face,
|
||||
actions=self.actions,
|
||||
silent=True,
|
||||
)
|
||||
# We remove the region, as the data is not correct - after all we are
|
||||
# running the analysis on a subimage.
|
||||
del fresult["region"]
|
||||
return fresult
|
||||
|
||||
def facial_expression_analysis(self) -> dict:
|
||||
@ -294,10 +293,11 @@ class EmotionDetector(AnalysisMethod):
|
||||
faces = list(reversed(sorted(faces, key=lambda f: f.shape[0] * f.shape[1])))
|
||||
self.subdict["face"] = "Yes"
|
||||
self.subdict["multiple_faces"] = "Yes" if len(faces) > 1 else "No"
|
||||
# number of faces only counted up to 15, after that set to 99
|
||||
self.subdict["no_faces"] = len(faces) if len(faces) <= 15 else 99
|
||||
# note number of faces being identified
|
||||
# We limit ourselves to identify emotion on max three faces per image
|
||||
result = {"number_faces": len(faces) if len(faces) <= 3 else 3}
|
||||
# We limit ourselves to three faces
|
||||
for i, face in enumerate(faces[:3]):
|
||||
result[f"person{i+1}"] = self.analyze_single_face(face)
|
||||
self.clean_subdict(result)
|
||||
@ -314,8 +314,8 @@ class EmotionDetector(AnalysisMethod):
|
||||
"""
|
||||
# Each person subdict converted into list for keys
|
||||
self.subdict["wears_mask"] = []
|
||||
self.subdict["emotion"] = []
|
||||
self.subdict["emotion (category)"] = []
|
||||
if "emotion" in self.actions:
|
||||
self.subdict["emotion (category)"] = []
|
||||
for key in self.actions:
|
||||
self.subdict[key] = []
|
||||
# now iterate over the number of faces
|
||||
@ -328,32 +328,44 @@ class EmotionDetector(AnalysisMethod):
|
||||
person = "person{}".format(i + 1)
|
||||
wears_mask = result[person]["wears_mask"]
|
||||
self.subdict["wears_mask"].append("Yes" if wears_mask else "No")
|
||||
# actually the actions dict should take care of
|
||||
# the person wearing a mask or not
|
||||
for key in self.actions:
|
||||
resultdict = result[person]["result"][0]
|
||||
if key == "emotion":
|
||||
classified_emotion = result[person]["dominant_emotion"]
|
||||
confidence_value = result[person][key][classified_emotion]
|
||||
classified_emotion = resultdict["dominant_emotion"]
|
||||
confidence_value = resultdict[key][classified_emotion]
|
||||
outcome = (
|
||||
classified_emotion
|
||||
if confidence_value > self.emotion_threshold and not wears_mask
|
||||
else None
|
||||
)
|
||||
print("emotion confidence", confidence_value, outcome)
|
||||
# also set the emotion category
|
||||
self.emotion_categories[outcome]
|
||||
self.subdict["emotion (category)"].append(
|
||||
self.emotion_categories[outcome] if outcome else None
|
||||
)
|
||||
if outcome:
|
||||
self.subdict["emotion (category)"].append(
|
||||
self.emotion_categories[outcome]
|
||||
)
|
||||
else:
|
||||
self.subdict["emotion (category)"].append(None)
|
||||
elif key == "race":
|
||||
classified_race = result[person]["dominant_race"]
|
||||
confidence_value = result[person][key][classified_race]
|
||||
classified_race = resultdict["dominant_race"]
|
||||
confidence_value = resultdict[key][classified_race]
|
||||
outcome = (
|
||||
classified_race
|
||||
if confidence_value > self.race_threshold and not wears_mask
|
||||
else None
|
||||
)
|
||||
elif key == "age":
|
||||
outcome = result[person]["age"] if not wears_mask else None
|
||||
outcome = resultdict[key]
|
||||
elif key == "gender":
|
||||
outcome = result[person]["gender"] if not wears_mask else None
|
||||
classified_gender = resultdict["dominant_gender"]
|
||||
confidence_value = resultdict[key][classified_gender]
|
||||
outcome = (
|
||||
classified_gender
|
||||
if confidence_value > self.gender_threshold and not wears_mask
|
||||
else None
|
||||
)
|
||||
self.subdict[key].append(outcome)
|
||||
return self.subdict
|
||||
|
||||
|
||||
@ -29,9 +29,10 @@
|
||||
" # install setuptools\n",
|
||||
" # %pip install setuptools==61 -qqq\n",
|
||||
" # uninstall some pre-installed packages due to incompatibility\n",
|
||||
" %pip uninstall --yes tensorflow-probability dopamine-rl lida pandas-gbq torchaudio torchdata torchtext orbax-checkpoint flex-y -qqq\n",
|
||||
" %pip uninstall --yes tensorflow-probability dopamine-rl lida pandas-gbq torchaudio torchdata torchtext orbax-checkpoint flex-y jax jaxlib -qqq\n",
|
||||
" # install ammico\n",
|
||||
" %pip install git+https://github.com/ssciwr/ammico.git -qqq\n",
|
||||
" # install older version of jax to support transformers use of diffusers\n",
|
||||
" # mount google drive for data and API key\n",
|
||||
" from google.colab import drive\n",
|
||||
"\n",
|
||||
@ -95,6 +96,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"# jax also sometimes leads to problems on google colab\n",
|
||||
"# if this is the case, try restarting the kernel and executing this \n",
|
||||
"# and the above two code cells again\n",
|
||||
"import ammico\n",
|
||||
"# for displaying a progress bar\n",
|
||||
"from tqdm import tqdm"
|
||||
@ -255,7 +259,6 @@
|
||||
"source": [
|
||||
"for num, key in tqdm(enumerate(image_dict.keys()), total=len(image_dict)): # loop through all images\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key]).analyse_image() # analyse image with EmotionDetector and update dict\n",
|
||||
" \n",
|
||||
" if num % dump_every == 0 or num == len(image_dict) - 1: # save results every dump_every to dump_file\n",
|
||||
" image_df = ammico.get_dataframe(image_dict)\n",
|
||||
" image_df.to_csv(dump_file)"
|
||||
@ -277,16 +280,6 @@
|
||||
"`TextDetector`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ.pop(\"DISCLOSURE_AMMICO\")\n",
|
||||
"os.environ.get(\"DISCLOSURE_AMMICO\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@ -885,7 +878,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Detection of faces and facial expression analysis\n",
|
||||
"Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface, but only if the disclosure statement has been accepted (see above).\n",
|
||||
"Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The probabilistic detection of age, gender, race, and emotions is carried out with deepface, but only if the disclosure statement has been accepted (see above).\n",
|
||||
"\n",
|
||||
"<img src=\"../../docs/source/_static/emotion_detector.png\" width=\"800\" />\n",
|
||||
"\n",
|
||||
@ -895,13 +888,13 @@
|
||||
"\n",
|
||||
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
|
||||
"\n",
|
||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity and gender detection, `race_threshold` and `gender_threshold`, with the default set to 50% so that a confidence for race / gender above 0.5 only will return a value in the analysis. \n",
|
||||
"\n",
|
||||
"For age and gender, unfortunately no confidence value is accessible so that no threshold values can be set for this type of analysis.\n",
|
||||
"For age unfortunately no confidence value is accessible so that no threshold values can be set for this type of analysis. The [reported MAE of the model is ± 4.65](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/).\n",
|
||||
"\n",
|
||||
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
|
||||
"\n",
|
||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `accept_disclosure` are optional:"
|
||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `accept_disclosure` are optional:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -912,6 +905,7 @@
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
]
|
||||
},
|
||||
@ -1417,7 +1411,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
{
|
||||
"pexels-pixabay-415829":
|
||||
{
|
||||
"face": "Yes",
|
||||
"multiple_faces": "No",
|
||||
"no_faces": 1,
|
||||
@ -7,4 +9,25 @@
|
||||
"race": ["asian"],
|
||||
"emotion": ["happy"],
|
||||
"emotion (category)": ["Positive"]
|
||||
}
|
||||
},
|
||||
"pexels-1000990-1954659":
|
||||
{
|
||||
"face": "Yes",
|
||||
"multiple_faces": "Yes",
|
||||
"no_faces": 2,
|
||||
"wears_mask": ["No", "No"],
|
||||
"gender": ["Man", "Man"],
|
||||
"race": ["asian", "white"],
|
||||
"emotion": [null, null],
|
||||
"emotion (category)": [null, null]
|
||||
},
|
||||
"pexels-maksgelatin-4750169":
|
||||
{
|
||||
"face": "Yes",
|
||||
"multiple_faces": "No",
|
||||
"no_faces": 1,
|
||||
"wears_mask": ["Yes"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Двоичные данные
ammico/test/data/pexels-1000990-1954659.jpg
Обычный файл
Двоичные данные
ammico/test/data/pexels-1000990-1954659.jpg
Обычный файл
Двоичный файл не отображается.
|
После Ширина: | Высота: | Размер: 1.7 MiB |
Двоичные данные
ammico/test/data/pexels-maksgelatin-4750169.jpg
Обычный файл
Двоичные данные
ammico/test/data/pexels-maksgelatin-4750169.jpg
Обычный файл
Двоичный файл не отображается.
|
После Ширина: | Высота: | Размер: 1.3 MiB |
@ -54,6 +54,7 @@ def test_right_output_analysis_summary(get_AE, get_options, monkeypatch):
|
||||
None,
|
||||
50,
|
||||
50,
|
||||
50,
|
||||
"OTHER_VAR",
|
||||
"CIE 1976",
|
||||
"summary_and_questions",
|
||||
@ -74,6 +75,7 @@ def test_right_output_analysis_emotions(get_AE, get_options, monkeypatch):
|
||||
None,
|
||||
50,
|
||||
50,
|
||||
50,
|
||||
"OTHER_VAR",
|
||||
"CIE 1976",
|
||||
"summary_and_questions",
|
||||
|
||||
@ -11,10 +11,9 @@ def test_init_EmotionDetector(monkeypatch):
|
||||
assert ed.subdict["face"] == "No"
|
||||
assert ed.subdict["multiple_faces"] == "No"
|
||||
assert ed.subdict["wears_mask"] == ["No"]
|
||||
assert ed.subdict["emotion"] == [None]
|
||||
assert ed.subdict["age"] == [None]
|
||||
assert ed.emotion_threshold == 50
|
||||
assert ed.race_threshold == 50
|
||||
assert ed.gender_threshold == 50
|
||||
assert ed.emotion_categories["angry"] == "Negative"
|
||||
assert ed.emotion_categories["happy"] == "Positive"
|
||||
assert ed.emotion_categories["surprise"] == "Neutral"
|
||||
@ -25,10 +24,12 @@ def test_init_EmotionDetector(monkeypatch):
|
||||
{},
|
||||
emotion_threshold=80,
|
||||
race_threshold=30,
|
||||
gender_threshold=60,
|
||||
accept_disclosure="OTHER_VAR",
|
||||
)
|
||||
assert ed.emotion_threshold == 80
|
||||
assert ed.race_threshold == 30
|
||||
assert ed.gender_threshold == 60
|
||||
monkeypatch.delenv("OTHER_VAR", raising=False)
|
||||
# do not accept disclosure
|
||||
monkeypatch.setattr("builtins.input", lambda _: "no")
|
||||
@ -46,6 +47,10 @@ def test_init_EmotionDetector(monkeypatch):
|
||||
fc.EmotionDetector({}, race_threshold=150)
|
||||
with pytest.raises(ValueError):
|
||||
fc.EmotionDetector({}, race_threshold=-50)
|
||||
with pytest.raises(ValueError):
|
||||
fc.EmotionDetector({}, gender_threshold=150)
|
||||
with pytest.raises(ValueError):
|
||||
fc.EmotionDetector({}, gender_threshold=-50)
|
||||
# test pre-set variables: disclosure
|
||||
monkeypatch.delattr("builtins.input", raising=False)
|
||||
monkeypatch.setenv("OTHER_VAR", "something")
|
||||
@ -63,7 +68,7 @@ def test_define_actions(monkeypatch):
|
||||
monkeypatch.setenv("OTHER_VAR", "True")
|
||||
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
||||
ed._define_actions({"wears_mask": True})
|
||||
assert ed.actions == ["age", "gender"]
|
||||
assert ed.actions == ["age"]
|
||||
ed._define_actions({"wears_mask": False})
|
||||
assert ed.actions == ["age", "gender", "race", "emotion"]
|
||||
monkeypatch.setenv("OTHER_VAR", "False")
|
||||
@ -83,18 +88,30 @@ def test_ensure_deepface_models(monkeypatch):
|
||||
|
||||
def test_analyse_faces(get_path, monkeypatch):
|
||||
mydict = {
|
||||
"filename": get_path + "pexels-pixabay-415829.jpg",
|
||||
# one face, no mask
|
||||
"pexels-pixabay-415829": {"filename": get_path + "pexels-pixabay-415829.jpg"},
|
||||
# two faces, no mask
|
||||
"pexels-1000990-1954659": {"filename": get_path + "pexels-1000990-1954659.jpg"},
|
||||
# one face, mask
|
||||
"pexels-maksgelatin-4750169": {
|
||||
"filename": get_path + "pexels-maksgelatin-4750169.jpg"
|
||||
},
|
||||
}
|
||||
monkeypatch.setenv("OTHER_VAR", "True")
|
||||
mydict.update(
|
||||
fc.EmotionDetector(mydict, accept_disclosure="OTHER_VAR").analyse_image()
|
||||
)
|
||||
for key in mydict.keys():
|
||||
mydict[key].update(
|
||||
fc.EmotionDetector(
|
||||
mydict[key], emotion_threshold=80, accept_disclosure="OTHER_VAR"
|
||||
).analyse_image()
|
||||
)
|
||||
|
||||
with open(get_path + "example_faces.json", "r") as file:
|
||||
out_dict = json.load(file)
|
||||
# delete the filename key
|
||||
mydict.pop("filename", None)
|
||||
# do not test for age, as this is not a reliable metric
|
||||
mydict.pop("age", None)
|
||||
|
||||
for key in mydict.keys():
|
||||
assert mydict[key] == out_dict[key]
|
||||
# delete the filename key
|
||||
mydict[key].pop("filename", None)
|
||||
# do not test for age, as this is not a reliable metric
|
||||
mydict[key].pop("age", None)
|
||||
for subkey in mydict[key].keys():
|
||||
assert mydict[key][subkey] == out_dict[key][subkey]
|
||||
|
||||
@ -90,30 +90,19 @@ def test_check_for_missing_keys():
|
||||
"file2": {"faces": "No", "text_english": "Otherthing"},
|
||||
}
|
||||
# check that dict is not changed
|
||||
mydict2 = ut.check_for_missing_keys(mydict)
|
||||
mydict2 = ut._check_for_missing_keys(mydict)
|
||||
assert mydict2 == mydict
|
||||
# check that dict is updated if key is missing
|
||||
mydict = {
|
||||
"file1": {"faces": "Yes", "text_english": "Something"},
|
||||
"file2": {"faces": "No"},
|
||||
}
|
||||
mydict2 = ut.check_for_missing_keys(mydict)
|
||||
mydict2 = ut._check_for_missing_keys(mydict)
|
||||
assert mydict2["file2"] == {"faces": "No", "text_english": None}
|
||||
# check that dict is updated if more than one key is missing
|
||||
mydict = {"file1": {"faces": "Yes", "text_english": "Something"}, "file2": {}}
|
||||
mydict2 = ut.check_for_missing_keys(mydict)
|
||||
mydict2 = ut._check_for_missing_keys(mydict)
|
||||
assert mydict2["file2"] == {"faces": None, "text_english": None}
|
||||
# now test the exceptions
|
||||
with pytest.raises(ValueError):
|
||||
ut.check_for_missing_keys({"File": "path"})
|
||||
with pytest.raises(ValueError):
|
||||
ut.check_for_missing_keys({"File": {}})
|
||||
mydict = {
|
||||
"file1": {"faces": "Yes"},
|
||||
"file2": {"faces": "No", "text_english": "Something"},
|
||||
}
|
||||
with pytest.raises(ValueError):
|
||||
ut.check_for_missing_keys(mydict)
|
||||
|
||||
|
||||
def test_append_data_to_dict(get_path):
|
||||
|
||||
@ -170,14 +170,18 @@ class TextDetector(AnalysisMethod):
|
||||
"""
|
||||
if not self.skip_extraction:
|
||||
self.get_text_from_image()
|
||||
self.translate_text()
|
||||
self.remove_linebreaks()
|
||||
if self.analyse_text:
|
||||
self._run_spacy()
|
||||
self.clean_text()
|
||||
self.text_summary()
|
||||
self.text_sentiment_transformers()
|
||||
self.text_ner()
|
||||
# check that text was found
|
||||
if not self.subdict["text"]:
|
||||
print("No text found - skipping analysis.")
|
||||
else:
|
||||
self.translate_text()
|
||||
self.remove_linebreaks()
|
||||
if self.analyse_text:
|
||||
self._run_spacy()
|
||||
self.clean_text()
|
||||
self.text_summary()
|
||||
self.text_sentiment_transformers()
|
||||
self.text_ner()
|
||||
return self.subdict
|
||||
|
||||
def get_text_from_image(self):
|
||||
@ -203,6 +207,9 @@ class TextDetector(AnalysisMethod):
|
||||
if response:
|
||||
texts = response.text_annotations[0].description
|
||||
self.subdict["text"] = texts
|
||||
else:
|
||||
print("No text found on image.")
|
||||
self.subdict["text"] = None
|
||||
if response.error.message:
|
||||
print("Google Cloud Vision Error")
|
||||
raise ValueError(
|
||||
|
||||
@ -156,7 +156,7 @@ def initialize_dict(filelist: list) -> dict:
|
||||
return mydict
|
||||
|
||||
|
||||
def check_for_missing_keys(mydict: dict) -> dict:
|
||||
def _check_for_missing_keys(mydict: dict) -> dict:
|
||||
"""Check the nested dictionary for any missing keys in the subdicts.
|
||||
|
||||
Args:
|
||||
@ -164,44 +164,23 @@ def check_for_missing_keys(mydict: dict) -> dict:
|
||||
Returns:
|
||||
dict: The dictionary with keys appended."""
|
||||
# check that we actually got a nested dict
|
||||
if not isinstance(mydict[next(iter(mydict))], dict):
|
||||
raise ValueError(
|
||||
"Please provide a nested dictionary - you provided {}".format(
|
||||
next(iter(mydict))
|
||||
)
|
||||
)
|
||||
# gather all existing keys of first item in a list
|
||||
subdict = mydict[next(iter(mydict))]
|
||||
if len(list(subdict.keys())) < 1:
|
||||
raise ValueError(
|
||||
"Could not get any keys to compare to - please check if your nested dict is empty!"
|
||||
)
|
||||
# also get all keys for all items
|
||||
# currently we go through the whole dictionary twice
|
||||
# however, compared to the rest of the code this is negligible
|
||||
keylist = []
|
||||
for key in mydict.keys():
|
||||
# compare keys of next item with first item
|
||||
if subdict.keys() != mydict[key].keys():
|
||||
# print a warning if key is not found and set to None
|
||||
keys_a = set(subdict.keys())
|
||||
keys_b = set(mydict[key].keys())
|
||||
missing_keys_in_b = keys_a - keys_b
|
||||
if missing_keys_in_b:
|
||||
print(
|
||||
"Found missing key(s) {} in subdict {} - setting to None.".format(
|
||||
missing_keys_in_b, key
|
||||
)
|
||||
)
|
||||
for missing_key in missing_keys_in_b:
|
||||
mydict[key][missing_key] = None
|
||||
# check that there are no other keys in the subdicts -
|
||||
# this would only happen if there is a key missing in the first subdict
|
||||
# then we would need to start over so best to
|
||||
# abort if this happens - this is a very unlikely case
|
||||
missing_keys_in_a = keys_b - keys_a
|
||||
if missing_keys_in_a:
|
||||
raise ValueError(
|
||||
"Could not update missing keys - first item already missing {}".format(
|
||||
missing_keys_in_a
|
||||
)
|
||||
)
|
||||
if not isinstance(mydict[key], dict):
|
||||
raise ValueError(
|
||||
"Please provide a nested dictionary - you provided {}".format(key)
|
||||
)
|
||||
keylist.append(list(mydict[key].keys()))
|
||||
# find the longest list of keys
|
||||
max_keys = max(keylist, key=len)
|
||||
# now generate missing keys
|
||||
for key in mydict.keys():
|
||||
for mkey in max_keys:
|
||||
if mkey not in mydict[key].keys():
|
||||
mydict[key][mkey] = None
|
||||
return mydict
|
||||
|
||||
|
||||
@ -223,7 +202,7 @@ def dump_df(mydict: dict) -> DataFrame:
|
||||
|
||||
|
||||
def get_dataframe(mydict: dict) -> DataFrame:
|
||||
check_for_missing_keys(mydict)
|
||||
_check_for_missing_keys(mydict)
|
||||
outdict = append_data_to_dict(mydict)
|
||||
return dump_df(outdict)
|
||||
|
||||
|
||||
@ -845,7 +845,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Detection of faces and facial expression analysis\n",
|
||||
"Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface, but only if the disclosure statement has been accepted (see above).\n",
|
||||
"Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The probabilistic detection of age, gender, race, and emotions is carried out with deepface, but only if the disclosure statement has been accepted (see above).\n",
|
||||
"\n",
|
||||
"<img src=\"../_static/emotion_detector.png\" width=\"800\" />\n",
|
||||
"\n",
|
||||
@ -855,13 +855,13 @@
|
||||
"\n",
|
||||
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
|
||||
"\n",
|
||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity and gender detection, `race_threshold` and `gender_threshold`, with the default set to 50% so that a confidence for race / gender above 0.5 only will return a value in the analysis.\n",
|
||||
"\n",
|
||||
"Gender and age probabilistic detection do not allow access to the confidence value, so that no threshold can be set for these.\n",
|
||||
"For age unfortunately no confidence value is accessible so that no threshold values can be set for this type of analysis. The [reported MAE of the model is ± 4.65](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/).\n",
|
||||
"\n",
|
||||
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
|
||||
"\n",
|
||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `accept_disclosure` are optional:"
|
||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `accept_disclosure` are optional:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -872,6 +872,7 @@
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50,\n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
]
|
||||
},
|
||||
|
||||
@ -25,7 +25,7 @@ dependencies = [
|
||||
"bertopic<=0.14.1",
|
||||
"dash>=2.11.0",
|
||||
"datasets",
|
||||
"deepface<=0.0.75",
|
||||
"deepface<=0.0.92",
|
||||
"googletrans==3.1.0a0",
|
||||
"google-cloud-vision",
|
||||
"grpcio",
|
||||
|
||||
Загрузка…
x
Ссылка в новой задаче
Block a user