From f89fa4e519f01b7926f6c7e10d2b55c3e8240a74 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 30 Mar 2023 19:39:21 +0200 Subject: [PATCH] [pre-commit.ci] pre-commit autoupdate (#46) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/psf/black: 22.12.0 → 23.1.0](https://github.com/psf/black/compare/22.12.0...23.1.0) - [github.com/psf/black: 22.12.0 → 23.1.0](https://github.com/psf/black/compare/22.12.0...23.1.0) - [github.com/s-weigand/flake8-nb: v0.5.0 → v0.5.2](https://github.com/s-weigand/flake8-nb/compare/v0.5.0...v0.5.2) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update .pre-commit-config.yaml * run hooks --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Inga Ulusoy --- .pre-commit-config.yaml | 24 ++++++++----------- misinformation/faces.py | 1 - misinformation/multimodal_search.py | 8 ++++--- misinformation/summary.py | 1 - misinformation/test/test_multimodal_search.py | 1 - notebooks/colors_expression.ipynb | 4 ++-- 6 files changed, 17 insertions(+), 22 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 486a2dc..10fe702 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,21 +2,17 @@ repos: - repo: https://github.com/kynan/nbstripout rev: 0.6.1 hooks: - - id: nbstripout - files: ".ipynb" + - id: nbstripout + files: ".ipynb" - repo: https://github.com/psf/black - rev: 22.12.0 + rev: 23.1.0 hooks: - id: black - - repo: https://github.com/psf/black - rev: 22.12.0 + - repo: https://github.com/pycqa/flake8 + rev: 6.0.0 hooks: - - id: black-jupyter - - repo: https://github.com/pycqa/flake8 - rev: 6.0.0 - hooks: - - id: flake8 - - repo: https://github.com/s-weigand/flake8-nb - rev: v0.5.0 - hooks: - - id: flake8-nb + - id: flake8 + - repo: https://github.com/s-weigand/flake8-nb + rev: v0.5.0 + hooks: + - id: flake8-nb diff --git a/misinformation/faces.py b/misinformation/faces.py index a82da44..5f604e0 100644 --- a/misinformation/faces.py +++ b/misinformation/faces.py @@ -153,7 +153,6 @@ class EmotionDetector(utils.AnalysisMethod): return fresult def facial_expression_analysis(self) -> dict: - # Find (multiple) faces in the image and cut them retinaface_model.get() faces = RetinaFace.extract_faces(self.subdict["filename"]) diff --git a/misinformation/multimodal_search.py b/misinformation/multimodal_search.py index ad4ea8f..7b47a0b 100644 --- a/misinformation/multimodal_search.py +++ b/misinformation/multimodal_search.py @@ -137,7 +137,6 @@ class MultimodalSearch(AnalysisMethod): return features_text def parsing_images(self, model_type, path_to_saved_tensors=None): - if model_type in ("clip_base", "clip_vitl14_336", "clip_vitl14"): path_to_lib = lavis.__file__[:-11] + "models/clip_models/" url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/lavis/models/clip_models/bpe_simple_vocab_16e6.txt.gz" @@ -166,7 +165,11 @@ class MultimodalSearch(AnalysisMethod): } if model_type in select_model.keys(): - (model, vis_processors, txt_processors,) = select_model[ + ( + model, + vis_processors, + txt_processors, + ) = select_model[ model_type ](self, MultimodalSearch.multimodal_device) else: @@ -200,7 +203,6 @@ class MultimodalSearch(AnalysisMethod): def querys_processing( self, search_query, model, txt_processors, vis_processors, model_type ): - select_extract_image_features = { "blip2": MultimodalSearch.extract_image_features_blip2, "blip": MultimodalSearch.extract_image_features_basic, diff --git a/misinformation/summary.py b/misinformation/summary.py index 32cd7f4..18117c7 100644 --- a/misinformation/summary.py +++ b/misinformation/summary.py @@ -37,7 +37,6 @@ class SummaryDetector(AnalysisMethod): return summary_model, summary_vis_processors def analyse_image(self, summary_model=None, summary_vis_processors=None): - if summary_model is None and summary_vis_processors is None: summary_model, summary_vis_processors = self.load_model_base() diff --git a/misinformation/test/test_multimodal_search.py b/misinformation/test/test_multimodal_search.py index ca5f162..4aaaccf 100644 --- a/misinformation/test/test_multimodal_search.py +++ b/misinformation/test/test_multimodal_search.py @@ -355,7 +355,6 @@ def test_parsing_images( pre_simularity, pre_sorted, ): - ms.MultimodalSearch.multimodal_device = pre_multimodal_device ( model, diff --git a/notebooks/colors_expression.ipynb b/notebooks/colors_expression.ipynb index d058a25..0c5f5bb 100644 --- a/notebooks/colors_expression.ipynb +++ b/notebooks/colors_expression.ipynb @@ -48,7 +48,7 @@ " startX = 0\n", " # loop over the percentage of each cluster and the color of\n", " # each cluster\n", - " for (percent, color) in zip(hist, centroids):\n", + " for percent, color in zip(hist, centroids):\n", " # plot the relative percentage of each cluster\n", " endX = startX + (percent * 300)\n", " cv2.rectangle(\n", @@ -123,7 +123,7 @@ "metadata": {}, "outputs": [], "source": [ - "for (percent, color) in zip(hist, clt.cluster_centers_):\n", + "for percent, color in zip(hist, clt.cluster_centers_):\n", " print(\"color:\", color, \" percentage:\", percent)" ] }