AMMICO/misinformation/summary.py
Petr Andriushchenko 2891c8a6ed
add image summary notebook (#57)
* add image summary notebook

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* pin deepface version to avoid bug with progress bar after update

* update actions version for checkout and python

* test ci without lavis

* no lavis for ci test

* merging

* return lavis

* change lavis to salesforce-lavis

* change pycocotools install method

* change pycocotools install method

* fix_pycocotools

* Downgrade Python

* back to 3.9 and remove pycocotools dependance

* instrucctions for windows

* missing comma after merge

* lavis only for ubuntu

* use lavis package name in install instead of git

* adding multimodal searching py and notebook

* exclude lavis on windows

* skip import on windows

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* reactivate lavis

* Revert "reactivate lavis"

This reverts commit ecdaf9d316e4b08816ba62da5e0482c8ff15b14e.

* Change input format for multimodal search

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* fix clip models

* account for new interface in init imports

* changed imports bec of lavis/windows

* fix if-else, added clip ViT-L-14=336 model

* fix code smells

* add model change function to summary

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* fixed new model in summary.py

* fixed summary windget

* moved some function to utils

* fixed imort torch in utils

* added test_summary.py

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* fixed opencv version

* added first test of multimodal_search.py

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* fixed test

* removed windows in CI and added test in multimodal search

* change lavis from dependencies from pip ro git

* fixed blip2 model in test_multimodal_search.py

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* fixed test multimodal search on cpu and gpu machines

* added test, fixed dependencies

* add -vv to pytest command in CI

* added test_multimodal_search tests

* fixed tests in test_multimodal_search.py

* fixed tests in test_summary

* changed CI and fixed test_multimodel search

* fixed ci

* fixed error in test multimodal search, changed ci

* added multimodal search test, added windows CI, added picture in test data

* CI debuging

* fixing tests in CI

* fixing test in CI 2

* fixing CI 3

* fixing CI

* added filtering function

* Brought back all tests after CI fixing

* changed CI one pytest by individual tests

* fixed opencv problem

* fix path for text, adjust result for new gcv

* remove opencv

* fixing cv2 error

* added opencv-contrib, change objects_cvlib

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* fixing tests in CI

* fixing CI testing

* cleanup objects

* fixing codecov in CI

* fixing codecov in CI

* run tests together; install opencv last

* update requirements for opencv dependencies

* moved lavis functions from utils to summary

* Remove lavis from utils.py

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* add missing jupyter

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: iulusoy <inga.ulusoy@uni-heidelberg.de>
2023-03-22 10:28:09 +01:00

105 строки
3.7 KiB
Python

from misinformation.utils import AnalysisMethod
from torch import device, cuda, no_grad
from PIL import Image
from lavis.models import load_model_and_preprocess
class SummaryDetector(AnalysisMethod):
def __init__(self, subdict: dict) -> None:
super().__init__(subdict)
summary_device = device("cuda" if cuda.is_available() else "cpu")
summary_model, summary_vis_processors, _ = load_model_and_preprocess(
name="blip_caption",
model_type="base_coco",
is_eval=True,
device=summary_device,
)
def load_model_base(self):
summary_device = device("cuda" if cuda.is_available() else "cpu")
summary_model, summary_vis_processors, _ = load_model_and_preprocess(
name="blip_caption",
model_type="base_coco",
is_eval=True,
device=summary_device,
)
return summary_model, summary_vis_processors
def load_model_large(self):
summary_device = device("cuda" if cuda.is_available() else "cpu")
summary_model, summary_vis_processors, _ = load_model_and_preprocess(
name="blip_caption",
model_type="large_coco",
is_eval=True,
device=summary_device,
)
return summary_model, summary_vis_processors
def load_model(self, model_type):
select_model = {
"base": SummaryDetector.load_model_base,
"large": SummaryDetector.load_model_large,
}
summary_model, summary_vis_processors = select_model[model_type](self)
return summary_model, summary_vis_processors
def analyse_image(self, summary_model=None, summary_vis_processors=None):
if summary_model is None and summary_vis_processors is None:
summary_model = SummaryDetector.summary_model
summary_vis_processors = SummaryDetector.summary_vis_processors
path = self.subdict["filename"]
raw_image = Image.open(path).convert("RGB")
image = (
summary_vis_processors["eval"](raw_image)
.unsqueeze(0)
.to(self.summary_device)
)
with no_grad():
self.subdict["const_image_summary"] = summary_model.generate(
{"image": image}
)[0]
self.subdict["3_non-deterministic summary"] = summary_model.generate(
{"image": image}, use_nucleus_sampling=True, num_captions=3
)
return self.subdict
(
summary_VQA_model,
summary_VQA_vis_processors,
summary_VQA_txt_processors,
) = load_model_and_preprocess(
name="blip_vqa", model_type="vqav2", is_eval=True, device=summary_device
)
def analyse_questions(self, list_of_questions):
if len(list_of_questions) > 0:
path = self.subdict["filename"]
raw_image = Image.open(path).convert("RGB")
image = (
self.summary_VQA_vis_processors["eval"](raw_image)
.unsqueeze(0)
.to(self.summary_device)
)
question_batch = []
for quest in list_of_questions:
question_batch.append(self.summary_VQA_txt_processors["eval"](quest))
batch_size = len(list_of_questions)
image_batch = image.repeat(batch_size, 1, 1, 1)
with no_grad():
answers_batch = self.summary_VQA_model.predict_answers(
samples={"image": image_batch, "text_input": question_batch},
inference_method="generate",
)
for q, a in zip(list_of_questions, answers_batch):
self.subdict[q] = a
else:
print("Please, enter list of questions")
return self.subdict