зеркало из
https://github.com/ssciwr/AMMICO.git
synced 2025-10-29 21:16:06 +02:00
Update lavis dependencies (#204)
Этот коммит содержится в:
родитель
03faa12f77
Коммит
17846fbe71
11
.github/workflows/ci.yml
поставляемый
11
.github/workflows/ci.yml
поставляемый
@ -16,7 +16,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-22.04,windows-latest,macos-latest]
|
os: [ubuntu-22.04,windows-latest,macos-latest]
|
||||||
python-version: [3.9]
|
python-version: [3.11]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@ -24,10 +24,15 @@ jobs:
|
|||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
- name: install ffmpeg on macos
|
||||||
|
if: matrix.os == 'macos-latest'
|
||||||
|
run: |
|
||||||
|
brew install ffmpeg
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install uv
|
# python -m pip install uv
|
||||||
uv pip install --system -e .
|
pip install -e .
|
||||||
|
# uv pip install --system -e .
|
||||||
- name: Run pytest test_colors
|
- name: Run pytest test_colors
|
||||||
run: |
|
run: |
|
||||||
cd ammico
|
cd ammico
|
||||||
|
|||||||
@ -24,7 +24,7 @@ Use pre-processed image files such as social media posts with comments and proce
|
|||||||
1. Question answering
|
1. Question answering
|
||||||
1. Performing person and face recognition in images
|
1. Performing person and face recognition in images
|
||||||
1. Face mask detection
|
1. Face mask detection
|
||||||
1. Age, gender and race detection
|
1. Probabilistic detection of age, gender and race detection
|
||||||
1. Emotion recognition
|
1. Emotion recognition
|
||||||
1. Color analysis
|
1. Color analysis
|
||||||
1. Analyse hue and percentage of color on image
|
1. Analyse hue and percentage of color on image
|
||||||
|
|||||||
@ -101,8 +101,6 @@ class AnalysisExplorer:
|
|||||||
State("setting_Text_revision_numbers", "value"),
|
State("setting_Text_revision_numbers", "value"),
|
||||||
State("setting_Emotion_emotion_threshold", "value"),
|
State("setting_Emotion_emotion_threshold", "value"),
|
||||||
State("setting_Emotion_race_threshold", "value"),
|
State("setting_Emotion_race_threshold", "value"),
|
||||||
State("setting_Emotion_gender_threshold", "value"),
|
|
||||||
State("setting_Emotion_age_threshold", "value"),
|
|
||||||
State("setting_Emotion_env_var", "value"),
|
State("setting_Emotion_env_var", "value"),
|
||||||
State("setting_Color_delta_e_method", "value"),
|
State("setting_Color_delta_e_method", "value"),
|
||||||
State("setting_Summary_analysis_type", "value"),
|
State("setting_Summary_analysis_type", "value"),
|
||||||
@ -256,34 +254,6 @@ class AnalysisExplorer:
|
|||||||
],
|
],
|
||||||
align="start",
|
align="start",
|
||||||
),
|
),
|
||||||
dbc.Col(
|
|
||||||
[
|
|
||||||
html.P("Gender threshold"),
|
|
||||||
dcc.Input(
|
|
||||||
type="number",
|
|
||||||
value=50,
|
|
||||||
max=100,
|
|
||||||
min=0,
|
|
||||||
id="setting_Emotion_gender_threshold",
|
|
||||||
style={"width": "100%"},
|
|
||||||
),
|
|
||||||
],
|
|
||||||
align="start",
|
|
||||||
),
|
|
||||||
dbc.Col(
|
|
||||||
[
|
|
||||||
html.P("Age threshold"),
|
|
||||||
dcc.Input(
|
|
||||||
type="number",
|
|
||||||
value=50,
|
|
||||||
max=100,
|
|
||||||
min=0,
|
|
||||||
id="setting_Emotion_age_threshold",
|
|
||||||
style={"width": "100%"},
|
|
||||||
),
|
|
||||||
],
|
|
||||||
align="start",
|
|
||||||
),
|
|
||||||
dbc.Col(
|
dbc.Col(
|
||||||
[
|
[
|
||||||
html.P(
|
html.P(
|
||||||
@ -493,8 +463,6 @@ class AnalysisExplorer:
|
|||||||
settings_text_revision_numbers: str,
|
settings_text_revision_numbers: str,
|
||||||
setting_emotion_emotion_threshold: int,
|
setting_emotion_emotion_threshold: int,
|
||||||
setting_emotion_race_threshold: int,
|
setting_emotion_race_threshold: int,
|
||||||
setting_emotion_gender_threshold: int,
|
|
||||||
setting_emotion_age_threshold: int,
|
|
||||||
setting_emotion_env_var: str,
|
setting_emotion_env_var: str,
|
||||||
setting_color_delta_e_method: str,
|
setting_color_delta_e_method: str,
|
||||||
setting_summary_analysis_type: str,
|
setting_summary_analysis_type: str,
|
||||||
@ -550,8 +518,6 @@ class AnalysisExplorer:
|
|||||||
image_copy,
|
image_copy,
|
||||||
emotion_threshold=setting_emotion_emotion_threshold,
|
emotion_threshold=setting_emotion_emotion_threshold,
|
||||||
race_threshold=setting_emotion_race_threshold,
|
race_threshold=setting_emotion_race_threshold,
|
||||||
gender_threshold=setting_emotion_gender_threshold,
|
|
||||||
age_threshold=setting_emotion_age_threshold,
|
|
||||||
accept_disclosure=(
|
accept_disclosure=(
|
||||||
setting_emotion_env_var
|
setting_emotion_env_var
|
||||||
if setting_emotion_env_var
|
if setting_emotion_env_var
|
||||||
|
|||||||
107
ammico/faces.py
107
ammico/faces.py
@ -149,8 +149,6 @@ class EmotionDetector(AnalysisMethod):
|
|||||||
subdict: dict,
|
subdict: dict,
|
||||||
emotion_threshold: float = 50.0,
|
emotion_threshold: float = 50.0,
|
||||||
race_threshold: float = 50.0,
|
race_threshold: float = 50.0,
|
||||||
gender_threshold: float = 50.0,
|
|
||||||
age_threshold: float = 50.0,
|
|
||||||
accept_disclosure: str = "DISCLOSURE_AMMICO",
|
accept_disclosure: str = "DISCLOSURE_AMMICO",
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
@ -160,8 +158,6 @@ class EmotionDetector(AnalysisMethod):
|
|||||||
subdict (dict): The dictionary to store the analysis results.
|
subdict (dict): The dictionary to store the analysis results.
|
||||||
emotion_threshold (float): The threshold for detecting emotions (default: 50.0).
|
emotion_threshold (float): The threshold for detecting emotions (default: 50.0).
|
||||||
race_threshold (float): The threshold for detecting race (default: 50.0).
|
race_threshold (float): The threshold for detecting race (default: 50.0).
|
||||||
gender_threshold (float): The threshold for detecting gender (default: 50.0).
|
|
||||||
age_threshold (float): The threshold for detecting age (default: 50.0).
|
|
||||||
accept_disclosure (str): The name of the disclosure variable, that is
|
accept_disclosure (str): The name of the disclosure variable, that is
|
||||||
set upon accepting the disclosure (default: "DISCLOSURE_AMMICO").
|
set upon accepting the disclosure (default: "DISCLOSURE_AMMICO").
|
||||||
"""
|
"""
|
||||||
@ -172,14 +168,8 @@ class EmotionDetector(AnalysisMethod):
|
|||||||
raise ValueError("Emotion threshold must be between 0 and 100.")
|
raise ValueError("Emotion threshold must be between 0 and 100.")
|
||||||
if race_threshold < 0 or race_threshold > 100:
|
if race_threshold < 0 or race_threshold > 100:
|
||||||
raise ValueError("Race threshold must be between 0 and 100.")
|
raise ValueError("Race threshold must be between 0 and 100.")
|
||||||
if gender_threshold < 0 or gender_threshold > 100:
|
|
||||||
raise ValueError("Gender threshold must be between 0 and 100.")
|
|
||||||
if age_threshold < 0 or age_threshold > 100:
|
|
||||||
raise ValueError("Age threshold must be between 0 and 100.")
|
|
||||||
self.emotion_threshold = emotion_threshold
|
self.emotion_threshold = emotion_threshold
|
||||||
self.race_threshold = race_threshold
|
self.race_threshold = race_threshold
|
||||||
self.gender_threshold = gender_threshold
|
|
||||||
self.age_threshold = age_threshold
|
|
||||||
self.emotion_categories = {
|
self.emotion_categories = {
|
||||||
"angry": "Negative",
|
"angry": "Negative",
|
||||||
"disgust": "Negative",
|
"disgust": "Negative",
|
||||||
@ -232,13 +222,13 @@ class EmotionDetector(AnalysisMethod):
|
|||||||
"restricted_access_with_mask": [],
|
"restricted_access_with_mask": [],
|
||||||
}
|
}
|
||||||
if fresult["wears_mask"] and self.accepted:
|
if fresult["wears_mask"] and self.accepted:
|
||||||
actions = conditional_actions["all_with_mask"]
|
self.actions = conditional_actions["all_with_mask"]
|
||||||
elif fresult["wears_mask"] and not self.accepted:
|
elif fresult["wears_mask"] and not self.accepted:
|
||||||
actions = conditional_actions["restricted_access_with_mask"]
|
self.actions = conditional_actions["restricted_access_with_mask"]
|
||||||
elif not fresult["wears_mask"] and self.accepted:
|
elif not fresult["wears_mask"] and self.accepted:
|
||||||
actions = conditional_actions["all"]
|
self.actions = conditional_actions["all"]
|
||||||
elif not fresult["wears_mask"] and not self.accepted:
|
elif not fresult["wears_mask"] and not self.accepted:
|
||||||
actions = conditional_actions["restricted_access"]
|
self.actions = conditional_actions["restricted_access"]
|
||||||
else:
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Invalid mask detection {} and disclosure \
|
"Invalid mask detection {} and disclosure \
|
||||||
@ -246,16 +236,15 @@ class EmotionDetector(AnalysisMethod):
|
|||||||
fresult["wears_mask"], self.accepted
|
fresult["wears_mask"], self.accepted
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return actions
|
|
||||||
|
|
||||||
def _ensure_deepface_models(self, actions: list):
|
def _ensure_deepface_models(self):
|
||||||
# Ensure that all data has been fetched by pooch
|
# Ensure that all data has been fetched by pooch
|
||||||
deepface_face_expression_model.get()
|
deepface_face_expression_model.get()
|
||||||
if "race" in actions:
|
if "race" in self.actions:
|
||||||
deepface_race_model.get()
|
deepface_race_model.get()
|
||||||
if "age" in actions:
|
if "age" in self.actions:
|
||||||
deepface_age_model.get()
|
deepface_age_model.get()
|
||||||
if "gender" in actions:
|
if "gender" in self.actions:
|
||||||
deepface_gender_model.get()
|
deepface_gender_model.get()
|
||||||
|
|
||||||
def analyze_single_face(self, face: np.ndarray) -> dict:
|
def analyze_single_face(self, face: np.ndarray) -> dict:
|
||||||
@ -271,13 +260,13 @@ class EmotionDetector(AnalysisMethod):
|
|||||||
fresult = {}
|
fresult = {}
|
||||||
# Determine whether the face wears a mask
|
# Determine whether the face wears a mask
|
||||||
fresult["wears_mask"] = self.wears_mask(face)
|
fresult["wears_mask"] = self.wears_mask(face)
|
||||||
actions = self._define_actions(fresult)
|
self._define_actions(fresult)
|
||||||
self._ensure_deepface_models(actions)
|
self._ensure_deepface_models()
|
||||||
# Run the full DeepFace analysis
|
# Run the full DeepFace analysis
|
||||||
fresult.update(
|
fresult.update(
|
||||||
DeepFace.analyze(
|
DeepFace.analyze(
|
||||||
img_path=face,
|
img_path=face,
|
||||||
actions=actions,
|
actions=self.actions,
|
||||||
prog_bar=False,
|
prog_bar=False,
|
||||||
detector_backend="skip",
|
detector_backend="skip",
|
||||||
)
|
)
|
||||||
@ -325,49 +314,47 @@ class EmotionDetector(AnalysisMethod):
|
|||||||
"""
|
"""
|
||||||
# Each person subdict converted into list for keys
|
# Each person subdict converted into list for keys
|
||||||
self.subdict["wears_mask"] = []
|
self.subdict["wears_mask"] = []
|
||||||
self.subdict["age"] = []
|
|
||||||
self.subdict["gender"] = []
|
|
||||||
self.subdict["race"] = []
|
|
||||||
self.subdict["emotion"] = []
|
self.subdict["emotion"] = []
|
||||||
self.subdict["emotion (category)"] = []
|
self.subdict["emotion (category)"] = []
|
||||||
|
for key in self.actions:
|
||||||
|
self.subdict[key] = []
|
||||||
|
# now iterate over the number of faces
|
||||||
|
# and check thresholds
|
||||||
|
# the results for each person are returned as a nested dict
|
||||||
|
# race and emotion are given as dict with confidence values
|
||||||
|
# gender and age are given as one value with no confidence
|
||||||
|
# being passed
|
||||||
for i in range(result["number_faces"]):
|
for i in range(result["number_faces"]):
|
||||||
person = "person{}".format(i + 1)
|
person = "person{}".format(i + 1)
|
||||||
self.subdict["wears_mask"].append(
|
wears_mask = result[person]["wears_mask"]
|
||||||
"Yes" if result[person]["wears_mask"] else "No"
|
self.subdict["wears_mask"].append("Yes" if wears_mask else "No")
|
||||||
)
|
for key in self.actions:
|
||||||
self.subdict["age"].append(result[person]["age"])
|
if key == "emotion":
|
||||||
# Gender is now reported as a list of dictionaries.
|
classified_emotion = result[person]["dominant_emotion"]
|
||||||
# Each dict represents one face.
|
confidence_value = result[person][key][classified_emotion]
|
||||||
# Each dict contains probability for Woman and Man.
|
outcome = (
|
||||||
# We take only the higher probability result for each dict.
|
classified_emotion
|
||||||
self.subdict["gender"].append(result[person]["gender"])
|
if confidence_value > self.emotion_threshold and not wears_mask
|
||||||
# Race and emotion are only detected if a person does not wear a mask
|
else None
|
||||||
if result[person]["wears_mask"]:
|
|
||||||
self.subdict["race"].append(None)
|
|
||||||
self.subdict["emotion"].append(None)
|
|
||||||
self.subdict["emotion (category)"].append(None)
|
|
||||||
elif not result[person]["wears_mask"]:
|
|
||||||
# Check whether the race threshold was exceeded
|
|
||||||
if (
|
|
||||||
result[person]["race"][result[person]["dominant_race"]]
|
|
||||||
> self.race_threshold
|
|
||||||
):
|
|
||||||
self.subdict["race"].append(result[person]["dominant_race"])
|
|
||||||
else:
|
|
||||||
self.subdict["race"].append(None)
|
|
||||||
|
|
||||||
# Check whether the emotion threshold was exceeded
|
|
||||||
if (
|
|
||||||
result[person]["emotion"][result[person]["dominant_emotion"]]
|
|
||||||
> self.emotion_threshold
|
|
||||||
):
|
|
||||||
self.subdict["emotion"].append(result[person]["dominant_emotion"])
|
|
||||||
self.subdict["emotion (category)"].append(
|
|
||||||
self.emotion_categories[result[person]["dominant_emotion"]]
|
|
||||||
)
|
)
|
||||||
else:
|
# also set the emotion category
|
||||||
self.subdict["emotion"].append(None)
|
self.emotion_categories[outcome]
|
||||||
self.subdict["emotion (category)"].append(None)
|
self.subdict["emotion (category)"].append(
|
||||||
|
self.emotion_categories[outcome] if outcome else None
|
||||||
|
)
|
||||||
|
elif key == "race":
|
||||||
|
classified_race = result[person]["dominant_race"]
|
||||||
|
confidence_value = result[person][key][classified_race]
|
||||||
|
outcome = (
|
||||||
|
classified_race
|
||||||
|
if confidence_value > self.race_threshold and not wears_mask
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
elif key == "age":
|
||||||
|
outcome = result[person]["age"] if not wears_mask else None
|
||||||
|
elif key == "gender":
|
||||||
|
outcome = result[person]["gender"] if not wears_mask else None
|
||||||
|
self.subdict[key].append(outcome)
|
||||||
return self.subdict
|
return self.subdict
|
||||||
|
|
||||||
def wears_mask(self, face: np.ndarray) -> bool:
|
def wears_mask(self, face: np.ndarray) -> bool:
|
||||||
|
|||||||
@ -261,6 +261,15 @@
|
|||||||
" image_df.to_csv(dump_file)"
|
" image_df.to_csv(dump_file)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"image_df.head()"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -268,6 +277,16 @@
|
|||||||
"`TextDetector`:"
|
"`TextDetector`:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"os.environ.pop(\"DISCLOSURE_AMMICO\")\n",
|
||||||
|
"os.environ.get(\"DISCLOSURE_AMMICO\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@ -876,11 +895,13 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
|
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity, gender and age detection, `race_threshold`, `gender_threshold`, `age_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
"A similar threshold as for the emotion recognition is set for the race/ethnicity detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
||||||
|
"\n",
|
||||||
|
"For age and gender, unfortunately no confidence value is accessible so that no threshold values can be set for this type of analysis.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
|
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `age_threshold` are optional:"
|
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `accept_disclosure` are optional:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -891,7 +912,6 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"for key in image_dict.keys():\n",
|
"for key in image_dict.keys():\n",
|
||||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||||
" gender_threshold=50, age_threshold=50, \n",
|
|
||||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
Двоичные данные
ammico/test/data/IMG_3757.jpg
Двоичные данные
ammico/test/data/IMG_3757.jpg
Двоичный файл не отображается.
|
До Ширина: | Высота: | Размер: 42 KiB После Ширина: | Высота: | Размер: 42 KiB |
@ -1,10 +1 @@
|
|||||||
THE
|
THE ALGEBRAIC EIGENVALUE PROBLEM
|
||||||
ALGEBRAIC
|
|
||||||
EIGENVALUE
|
|
||||||
PROBLEM
|
|
||||||
DOM
|
|
||||||
NVS TIO
|
|
||||||
MINA
|
|
||||||
Monographs
|
|
||||||
on Numerical Analysis
|
|
||||||
J.. H. WILKINSON
|
|
||||||
@ -1 +1 @@
|
|||||||
algebraic eigenvalue problem monographs numerical analysis
|
the algebraic eigenvalue problem
|
||||||
|
|||||||
@ -54,8 +54,6 @@ def test_right_output_analysis_summary(get_AE, get_options, monkeypatch):
|
|||||||
None,
|
None,
|
||||||
50,
|
50,
|
||||||
50,
|
50,
|
||||||
50,
|
|
||||||
50,
|
|
||||||
"OTHER_VAR",
|
"OTHER_VAR",
|
||||||
"CIE 1976",
|
"CIE 1976",
|
||||||
"summary_and_questions",
|
"summary_and_questions",
|
||||||
@ -76,8 +74,6 @@ def test_right_output_analysis_emotions(get_AE, get_options, monkeypatch):
|
|||||||
None,
|
None,
|
||||||
50,
|
50,
|
||||||
50,
|
50,
|
||||||
50,
|
|
||||||
50,
|
|
||||||
"OTHER_VAR",
|
"OTHER_VAR",
|
||||||
"CIE 1976",
|
"CIE 1976",
|
||||||
"summary_and_questions",
|
"summary_and_questions",
|
||||||
|
|||||||
@ -14,36 +14,28 @@ def test_init_EmotionDetector(monkeypatch):
|
|||||||
assert ed.subdict["emotion"] == [None]
|
assert ed.subdict["emotion"] == [None]
|
||||||
assert ed.subdict["age"] == [None]
|
assert ed.subdict["age"] == [None]
|
||||||
assert ed.emotion_threshold == 50
|
assert ed.emotion_threshold == 50
|
||||||
assert ed.age_threshold == 50
|
|
||||||
assert ed.gender_threshold == 50
|
|
||||||
assert ed.race_threshold == 50
|
assert ed.race_threshold == 50
|
||||||
assert ed.emotion_categories["angry"] == "Negative"
|
assert ed.emotion_categories["angry"] == "Negative"
|
||||||
assert ed.emotion_categories["happy"] == "Positive"
|
assert ed.emotion_categories["happy"] == "Positive"
|
||||||
assert ed.emotion_categories["surprise"] == "Neutral"
|
assert ed.emotion_categories["surprise"] == "Neutral"
|
||||||
assert ed.accept_disclosure == "OTHER_VAR"
|
|
||||||
assert os.environ.get(ed.accept_disclosure) == "True"
|
|
||||||
assert ed.accepted
|
assert ed.accepted
|
||||||
monkeypatch.delenv(ed.accept_disclosure, raising=False)
|
monkeypatch.delenv("OTHER_VAR", raising=False)
|
||||||
# different thresholds
|
# different thresholds
|
||||||
ed = fc.EmotionDetector(
|
ed = fc.EmotionDetector(
|
||||||
{},
|
{},
|
||||||
emotion_threshold=80,
|
emotion_threshold=80,
|
||||||
race_threshold=30,
|
race_threshold=30,
|
||||||
gender_threshold=70,
|
|
||||||
age_threshold=90,
|
|
||||||
accept_disclosure="OTHER_VAR",
|
accept_disclosure="OTHER_VAR",
|
||||||
)
|
)
|
||||||
assert ed.emotion_threshold == 80
|
assert ed.emotion_threshold == 80
|
||||||
assert ed.race_threshold == 30
|
assert ed.race_threshold == 30
|
||||||
assert ed.gender_threshold == 70
|
monkeypatch.delenv("OTHER_VAR", raising=False)
|
||||||
assert ed.age_threshold == 90
|
|
||||||
monkeypatch.delenv(ed.accept_disclosure, raising=False)
|
|
||||||
# do not accept disclosure
|
# do not accept disclosure
|
||||||
monkeypatch.setattr("builtins.input", lambda _: "no")
|
monkeypatch.setattr("builtins.input", lambda _: "no")
|
||||||
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
||||||
assert os.environ.get(ed.accept_disclosure) == "False"
|
assert os.environ.get("OTHER_VAR") == "False"
|
||||||
assert not ed.accepted
|
assert not ed.accepted
|
||||||
monkeypatch.delenv(ed.accept_disclosure, raising=False)
|
monkeypatch.delenv("OTHER_VAR", raising=False)
|
||||||
# now test the exceptions: thresholds
|
# now test the exceptions: thresholds
|
||||||
monkeypatch.setattr("builtins.input", lambda _: "yes")
|
monkeypatch.setattr("builtins.input", lambda _: "yes")
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
@ -54,14 +46,6 @@ def test_init_EmotionDetector(monkeypatch):
|
|||||||
fc.EmotionDetector({}, race_threshold=150)
|
fc.EmotionDetector({}, race_threshold=150)
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
fc.EmotionDetector({}, race_threshold=-50)
|
fc.EmotionDetector({}, race_threshold=-50)
|
||||||
with pytest.raises(ValueError):
|
|
||||||
fc.EmotionDetector({}, gender_threshold=150)
|
|
||||||
with pytest.raises(ValueError):
|
|
||||||
fc.EmotionDetector({}, gender_threshold=-50)
|
|
||||||
with pytest.raises(ValueError):
|
|
||||||
fc.EmotionDetector({}, age_threshold=150)
|
|
||||||
with pytest.raises(ValueError):
|
|
||||||
fc.EmotionDetector({}, age_threshold=-50)
|
|
||||||
# test pre-set variables: disclosure
|
# test pre-set variables: disclosure
|
||||||
monkeypatch.delattr("builtins.input", raising=False)
|
monkeypatch.delattr("builtins.input", raising=False)
|
||||||
monkeypatch.setenv("OTHER_VAR", "something")
|
monkeypatch.setenv("OTHER_VAR", "something")
|
||||||
@ -78,22 +62,23 @@ def test_init_EmotionDetector(monkeypatch):
|
|||||||
def test_define_actions(monkeypatch):
|
def test_define_actions(monkeypatch):
|
||||||
monkeypatch.setenv("OTHER_VAR", "True")
|
monkeypatch.setenv("OTHER_VAR", "True")
|
||||||
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
||||||
actions = ed._define_actions({"wears_mask": True})
|
ed._define_actions({"wears_mask": True})
|
||||||
assert actions == ["age", "gender"]
|
assert ed.actions == ["age", "gender"]
|
||||||
actions = ed._define_actions({"wears_mask": False})
|
ed._define_actions({"wears_mask": False})
|
||||||
assert actions == ["age", "gender", "race", "emotion"]
|
assert ed.actions == ["age", "gender", "race", "emotion"]
|
||||||
monkeypatch.setenv("OTHER_VAR", "False")
|
monkeypatch.setenv("OTHER_VAR", "False")
|
||||||
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
||||||
actions = ed._define_actions({"wears_mask": True})
|
ed._define_actions({"wears_mask": True})
|
||||||
assert actions == []
|
assert ed.actions == []
|
||||||
actions = ed._define_actions({"wears_mask": False})
|
ed._define_actions({"wears_mask": False})
|
||||||
assert actions == ["emotion"]
|
assert ed.actions == ["emotion"]
|
||||||
|
|
||||||
|
|
||||||
def test_ensure_deepface_models(monkeypatch):
|
def test_ensure_deepface_models(monkeypatch):
|
||||||
monkeypatch.setenv("OTHER_VAR", "True")
|
monkeypatch.setenv("OTHER_VAR", "True")
|
||||||
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
|
||||||
ed._ensure_deepface_models(["age", "gender", "race", "emotion"])
|
ed.actions = ["age", "gender", "race", "emotion"]
|
||||||
|
ed._ensure_deepface_models()
|
||||||
|
|
||||||
|
|
||||||
def test_analyse_faces(get_path, monkeypatch):
|
def test_analyse_faces(get_path, monkeypatch):
|
||||||
|
|||||||
@ -127,8 +127,8 @@ def test_get_text_from_image(set_testdict, get_path, set_environ):
|
|||||||
test_obj.get_text_from_image()
|
test_obj.get_text_from_image()
|
||||||
ref_file = get_path + "text_" + item + ".txt"
|
ref_file = get_path + "text_" + item + ".txt"
|
||||||
with open(ref_file, "r", encoding="utf8") as file:
|
with open(ref_file, "r", encoding="utf8") as file:
|
||||||
reference_text = file.read()
|
reference_text = file.read().replace("\n", " ")
|
||||||
assert test_obj.subdict["text"] == reference_text
|
assert test_obj.subdict["text"].replace("\n", " ") == reference_text
|
||||||
|
|
||||||
|
|
||||||
def test_translate_text(set_testdict, get_path):
|
def test_translate_text(set_testdict, get_path):
|
||||||
@ -137,13 +137,13 @@ def test_translate_text(set_testdict, get_path):
|
|||||||
ref_file = get_path + "text_" + item + ".txt"
|
ref_file = get_path + "text_" + item + ".txt"
|
||||||
trans_file = get_path + "text_translated_" + item + ".txt"
|
trans_file = get_path + "text_translated_" + item + ".txt"
|
||||||
with open(ref_file, "r", encoding="utf8") as file:
|
with open(ref_file, "r", encoding="utf8") as file:
|
||||||
reference_text = file.read()
|
reference_text = file.read().replace("\n", " ")
|
||||||
with open(trans_file, "r", encoding="utf8") as file:
|
with open(trans_file, "r", encoding="utf8") as file:
|
||||||
true_translated_text = file.read()
|
true_translated_text = file.read().replace("\n", " ")
|
||||||
test_obj.subdict["text"] = reference_text
|
test_obj.subdict["text"] = reference_text
|
||||||
test_obj.translate_text()
|
test_obj.translate_text()
|
||||||
assert test_obj.subdict["text_language"] == lang
|
assert test_obj.subdict["text_language"] == lang
|
||||||
translated_text = test_obj.subdict["text_english"].lower()
|
translated_text = test_obj.subdict["text_english"].lower().replace("\n", " ")
|
||||||
for word in true_translated_text.lower():
|
for word in true_translated_text.lower():
|
||||||
assert word in translated_text
|
assert word in translated_text
|
||||||
|
|
||||||
@ -233,7 +233,7 @@ def test_read_csv(get_path):
|
|||||||
|
|
||||||
|
|
||||||
def test_PostprocessText(set_testdict, get_path):
|
def test_PostprocessText(set_testdict, get_path):
|
||||||
reference_dict = "THE\nALGEBRAIC\nEIGENVALUE\nPROBLEM\nDOM\nNVS TIO\nMINA\nMonographs\non Numerical Analysis\nJ.. H. WILKINSON"
|
reference_dict = "THE ALGEBRAIC EIGENVALUE PROBLEM"
|
||||||
reference_df = "Mathematische Formelsammlung\nfür Ingenieure und Naturwissenschaftler\nMit zahlreichen Abbildungen und Rechenbeispielen\nund einer ausführlichen Integraltafel\n3., verbesserte Auflage"
|
reference_df = "Mathematische Formelsammlung\nfür Ingenieure und Naturwissenschaftler\nMit zahlreichen Abbildungen und Rechenbeispielen\nund einer ausführlichen Integraltafel\n3., verbesserte Auflage"
|
||||||
img_numbers = ["IMG_3755", "IMG_3756", "IMG_3757"]
|
img_numbers = ["IMG_3755", "IMG_3756", "IMG_3757"]
|
||||||
for image_ref in img_numbers:
|
for image_ref in img_numbers:
|
||||||
|
|||||||
@ -855,11 +855,13 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
|
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"A similar threshold as for the emotion recognition is set for the race/ethnicity, gender and age detection, `race_threshold`, `gender_threshold`, `age_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
"A similar threshold as for the emotion recognition is set for the race/ethnicity detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
|
||||||
|
"\n",
|
||||||
|
"Gender and age probabilistic detection do not allow access to the confidence value, so that no threshold can be set for these.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
|
"You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `age_threshold` are optional:"
|
"Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `accept_disclosure` are optional:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -870,7 +872,6 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"for key in image_dict.keys():\n",
|
"for key in image_dict.keys():\n",
|
||||||
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
" image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
|
||||||
" gender_threshold=50, age_threshold=50, \n",
|
|
||||||
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
" accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@ -43,7 +43,7 @@ dependencies = [
|
|||||||
"pytest-cov",
|
"pytest-cov",
|
||||||
"Requests",
|
"Requests",
|
||||||
"retina_face",
|
"retina_face",
|
||||||
"ammico-lavis",
|
"ammico-lavis>=1.0.2.3",
|
||||||
"setuptools",
|
"setuptools",
|
||||||
"spacy",
|
"spacy",
|
||||||
"tensorflow>=2.13.0",
|
"tensorflow>=2.13.0",
|
||||||
|
|||||||
Загрузка…
x
Ссылка в новой задаче
Block a user