зеркало из
https://github.com/ssciwr/AMMICO.git
synced 2025-10-29 05:04:14 +02:00
Update lavis dependencies (#204)
Этот коммит содержится в:
родитель
03faa12f77
Коммит
17846fbe71
11
.github/workflows/ci.yml
поставляемый
11
.github/workflows/ci.yml
поставляемый
@ -16,7 +16,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-22.04,windows-latest,macos-latest]
|
||||
python-version: [3.9]
|
||||
python-version: [3.11]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@ -24,10 +24,15 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: install ffmpeg on macos
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
brew install ffmpeg
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install uv
|
||||
uv pip install --system -e .
|
||||
# python -m pip install uv
|
||||
pip install -e .
|
||||
# uv pip install --system -e .
|
||||
- name: Run pytest test_colors
|
||||
run: |
|
||||
cd ammico
|
||||
|
||||
@ -24,7 +24,7 @@ Use pre-processed image files such as social media posts with comments and proce
|
||||
1. Question answering
|
||||
1. Performing person and face recognition in images
|
||||
1. Face mask detection
|
||||
1. Age, gender and race detection
|
||||
1. Probabilistic detection of age, gender and race detection
|
||||
1. Emotion recognition
|
||||
1. Color analysis
|
||||
1. Analyse hue and percentage of color on image
|
||||
|
||||
@ -101,8 +101,6 @@ class AnalysisExplorer:
|
||||
State("setting_Text_revision_numbers", "value"),
|
||||
State("setting_Emotion_emotion_threshold", "value"),
|
||||
State("setting_Emotion_race_threshold", "value"),
|
||||
State("setting_Emotion_gender_threshold", "value"),
|
||||
State("setting_Emotion_age_threshold", "value"),
|
||||
State("setting_Emotion_env_var", "value"),
|
||||
State("setting_Color_delta_e_method", "value"),
|
||||
State("setting_Summary_analysis_type", "value"),
|
||||
@ -256,34 +254,6 @@ class AnalysisExplorer:
|
||||
],
|
||||
align="start",
|
||||
),
|
||||
dbc.Col(
|
||||
[
|
||||
html.P("Gender threshold"),
|
||||
dcc.Input(
|
||||
type="number",
|
||||
value=50,
|
||||
max=100,
|
||||
min=0,
|
||||
id="setting_Emotion_gender_threshold",
|
||||
style={"width": "100%"},
|
||||
),
|
||||
],
|
||||
align="start",
|
||||
),
|
||||
dbc.Col(
|
||||
[
|
||||
html.P("Age threshold"),
|
||||
dcc.Input(
|
||||
type="number",
|
||||
value=50,
|
||||
max=100,
|
||||
min=0,
|
||||
id="setting_Emotion_age_threshold",
|
||||
style={"width": "100%"},
|
||||
),
|
||||
],
|
||||
align="start",
|
||||
),
|
||||
dbc.Col(
|
||||
[
|
||||
html.P(
|
||||
@ -493,8 +463,6 @@ class AnalysisExplorer:
|
||||
settings_text_revision_numbers: str,
|
||||
setting_emotion_emotion_threshold: int,
|
||||
setting_emotion_race_threshold: int,
|
||||
setting_emotion_gender_threshold: int,
|
||||
setting_emotion_age_threshold: int,
|
||||
setting_emotion_env_var: str,
|
||||
setting_color_delta_e_method: str,
|
||||
setting_summary_analysis_type: str,
|
||||
@ -550,8 +518,6 @@ class AnalysisExplorer:
|
||||
image_copy,
|
||||
emotion_threshold=setting_emotion_emotion_threshold,
|
||||
race_threshold=setting_emotion_race_threshold,
|
||||
gender_threshold=setting_emotion_gender_threshold,
|
||||
age_threshold=setting_emotion_age_threshold,
|
||||
accept_disclosure=(
|
||||
setting_emotion_env_var
|
||||
if setting_emotion_env_var
|
||||
|
||||
107
ammico/faces.py
107
ammico/faces.py
@ -149,8 +149,6 @@ class EmotionDetector(AnalysisMethod):
|
||||
subdict: dict,
|
||||
emotion_threshold: float = 50.0,
|
||||
race_threshold: float = 50.0,
|
||||
gender_threshold: float = 50.0,
|
||||
age_threshold: float = 50.0,
|
||||
accept_disclosure: str = "DISCLOSURE_AMMICO",
|
||||
) -> None:
|
||||
"""
|
||||
@ -160,8 +158,6 @@ class EmotionDetector(AnalysisMethod):
|
||||
subdict (dict): The dictionary to store the analysis results.
|
||||
emotion_threshold (float): The threshold for detecting emotions (default: 50.0).
|
||||
race_threshold (float): The threshold for detecting race (default: 50.0).
|
||||
gender_threshold (float): The threshold for detecting gender (default: 50.0).
|
||||
age_threshold (float): The threshold for detecting age (default: 50.0).
|
||||
accept_disclosure (str): The name of the disclosure variable, that is
|
||||
set upon accepting the disclosure (default: "DISCLOSURE_AMMICO").
|
||||
"""
|
||||
@ -172,14 +168,8 @@ class EmotionDetector(AnalysisMethod):
|
||||
raise ValueError("Emotion threshold must be between 0 and 100.")
|
||||
if race_threshold < 0 or race_threshold > 100:
|
||||
raise ValueError("Race threshold must be between 0 and 100.")
|
||||
if gender_threshold < 0 or gender_threshold > 100:
|
||||
raise ValueError("Gender threshold must be between 0 and 100.")
|
||||
if age_threshold < 0 or age_threshold > 100:
|
||||
raise ValueError("Age threshold must be between 0 and 100.")
|
||||
self.emotion_threshold = emotion_threshold
|
||||
self.race_threshold = race_threshold
|
||||
self.gender_threshold = gender_threshold
|
||||
self.age_threshold = age_threshold
|
||||
self.emotion_categories = {
|
||||
"angry": "Negative",
|
||||
"disgust": "Negative",
|
||||
@ -232,13 +222,13 @@ class EmotionDetector(AnalysisMethod):
|
||||
"restricted_access_with_mask": [],
|
||||
}
|
||||
if fresult["wears_mask"] and self.accepted:
|
||||
actions = conditional_actions["all_with_mask"]
|
||||
self.actions = conditional_actions["all_with_mask"]
|
||||
elif fresult["wears_mask"] and not self.accepted:
|
||||
actions = conditional_actions["restricted_access_with_mask"]
|
||||
self.actions = conditional_actions["restricted_access_with_mask"]
|
||||
elif not fresult["wears_mask"] and self.accepted:
|
||||
actions = conditional_actions["all"]
|
||||
self.actions = conditional_actions["all"]
|
||||
elif not fresult["wears_mask"] and not self.accepted:
|
||||
actions = conditional_actions["restricted_access"]
|
||||
self.actions = conditional_actions["restricted_access"]
|
||||
else:
|
||||
raise ValueError(
|
||||
"Invalid mask detection {} and disclosure \
|
||||
@ -246,16 +236,15 @@ class EmotionDetector(AnalysisMethod):
|
||||
fresult["wears_mask"], self.accepted
|
||||
)
|
||||
)
|
||||
return actions
|
||||
|
||||
def _ensure_deepface_models(self, actions: list):
|
||||
def _ensure_deepface_models(self):
|
||||
# Ensure that all data has been fetched by pooch
|
||||
deepface_face_expression_model.get()
|
||||
if "race" in actions:
|
||||
if "race" in self.actions:
|
||||
deepface_race_model.get()
|
||||
if "age" in actions:
|
||||
if "age" in self.actions:
|
||||
deepface_age_model.get()
|
||||
if "gender" in actions:
|
||||
if "gender" in self.actions:
|
||||
deepface_gender_model.get()
|
||||
|
||||
def analyze_single_face(self, face: np.ndarray) -> dict:
|
||||
@ -271,13 +260,13 @@ class EmotionDetector(AnalysisMethod):
|
||||
fresult = {}
|
||||
# Determine whether the face wears a mask
|
||||
fresult["wears_mask"] = self.wears_mask(face)
|
||||
actions = self._define_actions(fresult)
|
||||
self._ensure_deepface_models(actions)
|
||||
self._define_actions(fresult)
|
||||
self._ensure_deepface_models()
|
||||
# Run the full DeepFace analysis
|
||||
fresult.update(
|
||||
DeepFace.analyze(
|
||||
img_path=face,
|
||||
actions=actions,
|
||||
actions=self.actions,
|
||||
prog_bar=False,
|
||||
detector_backend="skip",
|
||||
)
|
||||
@ -325,49 +314,47 @@ class EmotionDetector(AnalysisMethod):
|
||||
"""
|
||||
# Each person subdict converted into list for keys
|
||||
self.subdict["wears_mask"] = []
|
||||
self.subdict["age"] = []
|
||||
self.subdict["gender"] = []
|
||||
self.subdict["race"] = []
|
||||
self.subdict["emotion"] = []
|
||||
self.subdict["emotion (category)"] = []
|
||||
for key in self.actions:
|
||||
self.subdict[key] = []
|
||||
# now iterate over the number of faces
|
||||
# and check thresholds
|
||||
# the results for each person are returned as a nested dict
|
||||
# race and emotion are given as dict with confidence values
|
||||
# gender and age are given as one value with no confidence
|
||||
# being passed
|
||||
for i in range(result["number_faces"]):
|
||||
person = "person{}".format(i + 1)
|
||||
self.subdict["wears_mask"].append(
|
||||
"Yes" if result[person]["wears_mask"] else "No"
|
||||
)
|
||||
self.subdict["age"].append(result[person]["age"])
|
||||
# Gender is now reported as a list of dictionaries.
|
||||
# Each dict represents one face.
|
||||
# Each dict contains probability for Woman and Man.
|
||||
# We take only the higher probability result for each dict.
|
||||
self.subdict["gender"].append(result[person]["gender"])
|
||||
# Race and emotion are only detected if a person does not wear a mask
|
||||
if result[person]["wears_mask"]:
|
||||
self.subdict["race"].append(None)
|
||||
self.subdict["emotion"].append(None)
|
||||
self.subdict["emotion (category)"].append(None)
|
||||
elif not result[person]["wears_mask"]:
|
||||
# Check whether the race threshold was exceeded
|
||||
if (
|
||||
result[person]["race"][result[person]["dominant_race"]]
|
||||
> self.race_threshold
|
||||
):
|
||||
self.subdict["race"].append(result[person]["dominant_race"])
|
||||
else:
|
||||
self.subdict["race"].append(None)
|
||||
|
||||
# Check whether the emotion threshold was exceeded
|
||||
if (
|
||||
result[person]["emotion"][result[person]["dominant_emotion"]]
|
||||
> self.emotion_threshold
|
||||
):
|
||||
self.subdict["emotion"].append(result[person]["dominant_emotion"])
|
||||
self.subdict["emotion (category)"].append(
|
||||
self.emotion_categories[result[person]["dominant_emotion"]]
|
||||
wears_mask = result[person]["wears_mask"]
|
||||
self.subdict["wears_mask"].append("Yes" if wears_mask else "No")
|
||||
for key in self.actions:
|
||||
if key == "emotion":
|
||||
classified_emotion = result[person]["dominant_emotion"]
|
||||
confidence_value = result[person][key][classified_emotion]
|
||||
outcome = (
|
||||
classified_emotion
|
||||
if confidence_value > self.emotion_threshold and not wears_mask
|
||||
else None
|
||||
)
|
||||
else:
|
||||
self.subdict["emotion"].append(None)
|
||||
self.subdict["emotion (category)"].append(None)
|
||||
# also set the emotion category
|
||||
self.emotion_categories[outcome]
|
||||
self.subdict["emotion (category)"].append(
|
||||
self.emotion_categories[outcome] if outcome else None
|
||||
)
|
||||
elif key == "race":
|
||||
classified_race = result[person]["dominant_race"]
|
||||
confidence_value = result[person][key][classified_race]
|
||||
outcome = (
|
||||
classified_race
|
||||
if confidence_value > self.race_threshold and not wears_mask
|
||||
else None
|
||||
)
|
||||
elif key == "age":
|
||||
outcome = result[person]["age"] if not wears_mask else None
|
||||
elif key == "gender":
|
||||
outcome = result[person]["gender"] if not wears_mask else None
|
||||
self.subdict[key].append(outcome)
|
||||
return self.subdict
|
||||
|
||||
def wears_mask(self, face: np.ndarray) -> bool:
|
||||
|
||||
@ -261,6 +261,15 @@
|
||||
" image_df.to_csv(dump_file)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@ -268,6 +277,16 @@
|
||||
"`TextDetector`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ.pop(\"DISCLOSURE_AMMICO\")\n",
|
||||
"os.environ.get(\"DISCLOSURE_AMMICO\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@ -876,11 +895,13 @@
|
||||
"\n",
|
||||
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
|
||||
"\n",
|
||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity, gender and age detection, `race_threshold`, `gender_threshold`, `age_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
||||
"\n",
|
||||
"For age and gender, unfortunately no confidence value is accessible so that no threshold values can be set for this type of analysis.\n",
|
||||
"\n",
|
||||
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
|
||||
"\n",
|
||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `age_threshold` are optional:"
|
||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `accept_disclosure` are optional:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -891,7 +912,6 @@
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50, age_threshold=50, \n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
]
|
||||
},
|
||||
|
||||
Двоичные данные
ammico/test/data/IMG_3757.jpg
Двоичные данные
ammico/test/data/IMG_3757.jpg
Двоичный файл не отображается.
|
До Ширина: | Высота: | Размер: 42 KiB После Ширина: | Высота: | Размер: 42 KiB |
@ -1,10 +1 @@
|
||||
THE
|
||||
ALGEBRAIC
|
||||
EIGENVALUE
|
||||
PROBLEM
|
||||
DOM
|
||||
NVS TIO
|
||||
MINA
|
||||
Monographs
|
||||
on Numerical Analysis
|
||||
J.. H. WILKINSON
|
||||
THE ALGEBRAIC EIGENVALUE PROBLEM
|
||||
@ -1 +1 @@
|
||||
algebraic eigenvalue problem monographs numerical analysis
|
||||
the algebraic eigenvalue problem
|
||||
|
||||
@ -54,8 +54,6 @@ def test_right_output_analysis_summary(get_AE, get_options, monkeypatch):
|
||||
None,
|
||||
50,
|
||||
50,
|
||||
50,
|
||||
50,
|
||||
"OTHER_VAR",
|
||||
"CIE 1976",
|
||||
"summary_and_questions",
|
||||
@ -76,8 +74,6 @@ def test_right_output_analysis_emotions(get_AE, get_options, monkeypatch):
|
||||
None,
|
||||
50,
|
||||
50,
|
||||
50,
|
||||
50,
|
||||
"OTHER_VAR",
|
||||
"CIE 1976",
|
||||
"summary_and_questions",
|
||||
|
||||
@ -14,36 +14,28 @@ def test_init_EmotionDetector(monkeypatch):
|
||||
assert ed.subdict["emotion"] == [None]
|
||||
assert ed.subdict["age"] == [None]
|
||||
assert ed.emotion_threshold == 50
|
||||
assert ed.age_threshold == 50
|
||||
assert ed.gender_threshold == 50
|
||||
assert ed.race_threshold == 50
|
||||
assert ed.emotion_categories["angry"] == "Negative"
|
||||
assert ed.emotion_categories["happy"] == "Positive"
|
||||
assert ed.emotion_categories["surprise"] == "Neutral"
|
||||
assert ed.accept_disclosure == "OTHER_VAR"
|
||||
assert os.environ.get(ed.accept_disclosure) == "True"
|
||||
assert ed.accepted
|
||||
monkeypatch.delenv(ed.accept_disclosure, raising=False)
|
||||
monkeypatch.delenv("OTHER_VAR", raising=False)
|
||||
# different thresholds
|
||||
ed = fc.EmotionDetector(
|
||||
{},
|
||||
emotion_threshold=80,
|
||||
race_threshold=30,
|
||||
gender_threshold=70,
|
||||
age_threshold=90,
|
||||
accept_disclosure="OTHER_VAR",
|
||||
)
|
||||
assert ed.emotion_threshold == 80
|
||||
assert ed.race_threshold == 30
|
||||
assert ed.gender_threshold == 70
|
||||
assert ed.age_threshold == 90
|
||||
monkeypatch.delenv(ed.accept_disclosure, raising=False)
|
||||
monkeypatch.delenv("OTHER_VAR", raising=False)
|
||||
# do not accept disclosure
|
||||
monkeypatch.setattr("builtins.input", lambda _: "no")
|
||||
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
||||
assert os.environ.get(ed.accept_disclosure) == "False"
|
||||
assert os.environ.get("OTHER_VAR") == "False"
|
||||
assert not ed.accepted
|
||||
monkeypatch.delenv(ed.accept_disclosure, raising=False)
|
||||
monkeypatch.delenv("OTHER_VAR", raising=False)
|
||||
# now test the exceptions: thresholds
|
||||
monkeypatch.setattr("builtins.input", lambda _: "yes")
|
||||
with pytest.raises(ValueError):
|
||||
@ -54,14 +46,6 @@ def test_init_EmotionDetector(monkeypatch):
|
||||
fc.EmotionDetector({}, race_threshold=150)
|
||||
with pytest.raises(ValueError):
|
||||
fc.EmotionDetector({}, race_threshold=-50)
|
||||
with pytest.raises(ValueError):
|
||||
fc.EmotionDetector({}, gender_threshold=150)
|
||||
with pytest.raises(ValueError):
|
||||
fc.EmotionDetector({}, gender_threshold=-50)
|
||||
with pytest.raises(ValueError):
|
||||
fc.EmotionDetector({}, age_threshold=150)
|
||||
with pytest.raises(ValueError):
|
||||
fc.EmotionDetector({}, age_threshold=-50)
|
||||
# test pre-set variables: disclosure
|
||||
monkeypatch.delattr("builtins.input", raising=False)
|
||||
monkeypatch.setenv("OTHER_VAR", "something")
|
||||
@ -78,22 +62,23 @@ def test_init_EmotionDetector(monkeypatch):
|
||||
def test_define_actions(monkeypatch):
|
||||
monkeypatch.setenv("OTHER_VAR", "True")
|
||||
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
||||
actions = ed._define_actions({"wears_mask": True})
|
||||
assert actions == ["age", "gender"]
|
||||
actions = ed._define_actions({"wears_mask": False})
|
||||
assert actions == ["age", "gender", "race", "emotion"]
|
||||
ed._define_actions({"wears_mask": True})
|
||||
assert ed.actions == ["age", "gender"]
|
||||
ed._define_actions({"wears_mask": False})
|
||||
assert ed.actions == ["age", "gender", "race", "emotion"]
|
||||
monkeypatch.setenv("OTHER_VAR", "False")
|
||||
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
||||
actions = ed._define_actions({"wears_mask": True})
|
||||
assert actions == []
|
||||
actions = ed._define_actions({"wears_mask": False})
|
||||
assert actions == ["emotion"]
|
||||
ed._define_actions({"wears_mask": True})
|
||||
assert ed.actions == []
|
||||
ed._define_actions({"wears_mask": False})
|
||||
assert ed.actions == ["emotion"]
|
||||
|
||||
|
||||
def test_ensure_deepface_models(monkeypatch):
|
||||
monkeypatch.setenv("OTHER_VAR", "True")
|
||||
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
||||
ed._ensure_deepface_models(["age", "gender", "race", "emotion"])
|
||||
ed.actions = ["age", "gender", "race", "emotion"]
|
||||
ed._ensure_deepface_models()
|
||||
|
||||
|
||||
def test_analyse_faces(get_path, monkeypatch):
|
||||
|
||||
@ -127,8 +127,8 @@ def test_get_text_from_image(set_testdict, get_path, set_environ):
|
||||
test_obj.get_text_from_image()
|
||||
ref_file = get_path + "text_" + item + ".txt"
|
||||
with open(ref_file, "r", encoding="utf8") as file:
|
||||
reference_text = file.read()
|
||||
assert test_obj.subdict["text"] == reference_text
|
||||
reference_text = file.read().replace("\n", " ")
|
||||
assert test_obj.subdict["text"].replace("\n", " ") == reference_text
|
||||
|
||||
|
||||
def test_translate_text(set_testdict, get_path):
|
||||
@ -137,13 +137,13 @@ def test_translate_text(set_testdict, get_path):
|
||||
ref_file = get_path + "text_" + item + ".txt"
|
||||
trans_file = get_path + "text_translated_" + item + ".txt"
|
||||
with open(ref_file, "r", encoding="utf8") as file:
|
||||
reference_text = file.read()
|
||||
reference_text = file.read().replace("\n", " ")
|
||||
with open(trans_file, "r", encoding="utf8") as file:
|
||||
true_translated_text = file.read()
|
||||
true_translated_text = file.read().replace("\n", " ")
|
||||
test_obj.subdict["text"] = reference_text
|
||||
test_obj.translate_text()
|
||||
assert test_obj.subdict["text_language"] == lang
|
||||
translated_text = test_obj.subdict["text_english"].lower()
|
||||
translated_text = test_obj.subdict["text_english"].lower().replace("\n", " ")
|
||||
for word in true_translated_text.lower():
|
||||
assert word in translated_text
|
||||
|
||||
@ -233,7 +233,7 @@ def test_read_csv(get_path):
|
||||
|
||||
|
||||
def test_PostprocessText(set_testdict, get_path):
|
||||
reference_dict = "THE\nALGEBRAIC\nEIGENVALUE\nPROBLEM\nDOM\nNVS TIO\nMINA\nMonographs\non Numerical Analysis\nJ.. H. WILKINSON"
|
||||
reference_dict = "THE ALGEBRAIC EIGENVALUE PROBLEM"
|
||||
reference_df = "Mathematische Formelsammlung\nfür Ingenieure und Naturwissenschaftler\nMit zahlreichen Abbildungen und Rechenbeispielen\nund einer ausführlichen Integraltafel\n3., verbesserte Auflage"
|
||||
img_numbers = ["IMG_3755", "IMG_3756", "IMG_3757"]
|
||||
for image_ref in img_numbers:
|
||||
|
||||
@ -855,11 +855,13 @@
|
||||
"\n",
|
||||
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
|
||||
"\n",
|
||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity, gender and age detection, `race_threshold`, `gender_threshold`, `age_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
||||
"\n",
|
||||
"Gender and age probabilistic detection do not allow access to the confidence value, so that no threshold can be set for these.\n",
|
||||
"\n",
|
||||
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
|
||||
"\n",
|
||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `age_threshold` are optional:"
|
||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `accept_disclosure` are optional:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -870,7 +872,6 @@
|
||||
"source": [
|
||||
"for key in image_dict.keys():\n",
|
||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||
" gender_threshold=50, age_threshold=50, \n",
|
||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||
]
|
||||
},
|
||||
|
||||
@ -43,7 +43,7 @@ dependencies = [
|
||||
"pytest-cov",
|
||||
"Requests",
|
||||
"retina_face",
|
||||
"ammico-lavis",
|
||||
"ammico-lavis>=1.0.2.3",
|
||||
"setuptools",
|
||||
"spacy",
|
||||
"tensorflow>=2.13.0",
|
||||
|
||||
Загрузка…
x
Ссылка в новой задаче
Block a user