diff --git a/misinformation/multimodal_search.py b/misinformation/multimodal_search.py index 678bf91..e7d7cdf 100644 --- a/misinformation/multimodal_search.py +++ b/misinformation/multimodal_search.py @@ -154,17 +154,12 @@ class MultimodalSearch(AnalysisMethod): return features_text - def parsing_images( self, model_type, path_to_saved_tensors="./saved_tensors/", path_to_load_tensors=None, ): - - - - if model_type in ("clip_base", "clip_vitl14_336", "clip_vitl14"): path_to_lib = lavis.__file__[:-11] + "models/clip_models/" url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/lavis/models/clip_models/bpe_simple_vocab_16e6.txt.gz" @@ -470,7 +465,6 @@ class MultimodalSearch(AnalysisMethod): return att_map def upload_model_blip2_coco(self): - itm_model = load_model( "blip2_image_text_matching", "coco", @@ -481,7 +475,6 @@ class MultimodalSearch(AnalysisMethod): return itm_model, vis_processor def upload_model_blip_base(self): - itm_model = load_model( "blip_image_text_matching", "base", @@ -492,7 +485,6 @@ class MultimodalSearch(AnalysisMethod): return itm_model, vis_processor def upload_model_blip_large(self): - itm_model = load_model( "blip_image_text_matching", "large",